- FOR_ALL_INTERFACES
- FOR_ALL_INTERFACES_ADDRESSES
- JSON_FOREACH
+ - FOREACH_BE_TXN_BATCH_IN_LIST
+ - FOREACH_BE_APPLY_BATCH_IN_LIST
+ - FOREACH_BE_TXN_IN_LIST
+ - FOREACH_SESSION_IN_LIST
+ - FOREACH_MGMTD_BE_CLIENT_ID
# libyang
- LY_FOR_KEYS
- LY_LIST_FOR
- FOREACH_SAFI
# ospfd
- LSDB_LOOP
+ # mgmtd
+ - FOREACH_CMT_REC
+ - FOREACH_TXN_CFG_BATCH_IN_LIST
+ - FOREACH_TXN_REQ_IN_LIST
+ - FOREACH_TXN_IN_LIST
+ - FOREACH_MGMTD_DB_ID
+ - FOREACH_ADAPTER_IN_LIST
+ - FOREACH_SESSION_IN_LIST
+ - FOREACH_SESSION_IN_LIST_SAFE
$(AUTOMAKE_DUMMY)install-binPROGRAMS: install-libLTLIBRARIES
$(AUTOMAKE_DUMMY)install-sbinPROGRAMS: install-libLTLIBRARIES
+# Include default rules to compile protobuf message sources
+SUFFIXES += .proto .pb-c.c .pb-c.h
+
+# Rules
+
+AM_V_PROTOC_C = $(am__v_PROTOC_C_$(V))
+am__v_PROTOC_C_ = $(am__v_PROTOC_C_$(AM_DEFAULT_VERBOSITY))
+am__v_PROTOC_C_0 = @echo " PROTOC_C" $@;
+am__v_PROTOC_C_1 =
+
+%.pb-c.c %.pb-c.h : %.proto
+ $(AM_V_PROTOC_C)$(PROTOC_C) -I$(top_srcdir) --c_out=$(top_builddir) $^
+ $(AM_V_GEN)$(SED) -i -e '1i\
+ #include "config.h"' $@
+
include doc/subdir.am
include doc/user/subdir.am
include doc/manpages/subdir.am
include grpc/subdir.am
include tools/subdir.am
+include mgmtd/subdir.am
+
include bgpd/subdir.am
include bgpd/rfp-example/librfp/subdir.am
include bgpd/rfp-example/rfptest/subdir.am
pkgsrc/ripd.sh \
pkgsrc/ripngd.sh \
pkgsrc/zebra.sh \
+ pkgsrc/mgmtd.sh \
# end
endif
snapcraft/helpers \
snapcraft/snap \
babeld/Makefile \
+ mgmtd/Makefile \
bgpd/Makefile \
bgpd/rfp-example/librfp/Makefile \
bgpd/rfp-example/rfptest/Makefile \
$(MAKE) distclean CONFIG_CLEAN_FILES="$(filter-out $(EXTRA_DIST), $(CONFIG_CLEAN_FILES))"
indent:
- tools/indent.py `find sharpd bgpd eigrpd include isisd lib nhrpd ospf6d ospfd pimd qpb ripd vtysh zebra -name '*.[ch]' | grep -v include/linux`
+ tools/indent.py `find sharpd bgpd mgmtd eigrpd include isisd lib nhrpd ospf6d ospfd pimd qpb ripd vtysh zebra -name '*.[ch]' | grep -v include/linux`
if HAVE_GCOV
#include "getopt.h"
#include "if.h"
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "privs.h"
#include "sigevent.h"
#include "lib/version.h"
static void babel_save_state_file(void);
-struct thread_master *master; /* quagga's threads handler */
+struct event_loop *master; /* quagga's threads handler */
struct timeval babel_now; /* current time */
unsigned char myid[8]; /* unique id (mac address of an interface) */
#include "vty.h"
extern struct timeval babel_now; /* current time */
-extern struct thread_master *master; /* quagga's threads handler */
+extern struct event_loop *master; /* quagga's threads handler */
extern int debug;
extern int resend_delay;
DEFINE_MGROUP(BABELD, "babeld");
DEFINE_MTYPE_STATIC(BABELD, BABEL, "Babel Structure");
-static void babel_init_routing_process(struct thread *thread);
+static void babel_init_routing_process(struct event *thread);
static void babel_get_myid(void);
static void babel_initial_noise(void);
-static void babel_read_protocol(struct thread *thread);
-static void babel_main_loop(struct thread *thread);
+static void babel_read_protocol(struct event *thread);
+static void babel_main_loop(struct event *thread);
static void babel_set_timer(struct timeval *timeout);
static void babel_fill_with_next_timeout(struct timeval *tv);
static void
}
/* Threads. */
- thread_add_read(master, babel_read_protocol, NULL, protocol_socket, &babel_routing_process->t_read);
+ event_add_read(master, babel_read_protocol, NULL, protocol_socket,
+ &babel_routing_process->t_read);
/* wait a little: zebra will announce interfaces, addresses, routes... */
- thread_add_timer_msec(master, babel_init_routing_process, NULL, 200L, &babel_routing_process->t_update);
+ event_add_timer_msec(master, babel_init_routing_process, NULL, 200L,
+ &babel_routing_process->t_update);
/* Distribute list install. */
babel_routing_process->distribute_ctx = distribute_list_ctx_create (vrf_lookup_by_id(VRF_DEFAULT));
}
/* thread reading entries form others babel daemons */
-static void babel_read_protocol(struct thread *thread)
+static void babel_read_protocol(struct event *thread)
{
int rc;
struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
}
/* re-add thread */
- thread_add_read(master, &babel_read_protocol, NULL, protocol_socket, &babel_routing_process->t_read);
+ event_add_read(master, &babel_read_protocol, NULL, protocol_socket,
+ &babel_routing_process->t_read);
}
/* Zebra will give some information, especially about interfaces. This function
must be call with a litte timeout wich may give zebra the time to do his job,
making these inits have sense. */
-static void babel_init_routing_process(struct thread *thread)
+static void babel_init_routing_process(struct event *thread)
{
myseqno = (frr_weak_random() & 0xFFFF);
babel_get_myid();
babel_interface_close_all();
/* cancel events */
- thread_cancel(&babel_routing_process->t_read);
- thread_cancel(&babel_routing_process->t_update);
+ event_cancel(&babel_routing_process->t_read);
+ event_cancel(&babel_routing_process->t_update);
distribute_list_delete(&babel_routing_process->distribute_ctx);
XFREE(MTYPE_BABEL, babel_routing_process);
}
/* Function used with timeout. */
-static void babel_main_loop(struct thread *thread)
+static void babel_main_loop(struct event *thread)
{
struct timeval tv;
struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
babel_set_timer(struct timeval *timeout)
{
long msecs = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;
- thread_cancel(&(babel_routing_process->t_update));
- thread_add_timer_msec(master, babel_main_loop, NULL, msecs, &babel_routing_process->t_update);
+ event_cancel(&(babel_routing_process->t_update));
+ event_add_timer_msec(master, babel_main_loop, NULL, msecs,
+ &babel_routing_process->t_update);
}
void
struct babel
{
/* Babel threads. */
- struct thread *t_read; /* on Babel protocol's socket */
- struct thread *t_update; /* timers */
+ struct event *t_read; /* on Babel protocol's socket */
+ struct event *t_update; /* timers */
/* distribute_ctx */
struct distribute_ctx *distribute_ctx;
};
#include "command.h"
#include "vty.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "nexthop.h"
#include "util.h"
return bfd_key_lookup(key);
}
-void bfd_xmt_cb(struct thread *t)
+void bfd_xmt_cb(struct event *t)
{
- struct bfd_session *bs = THREAD_ARG(t);
+ struct bfd_session *bs = EVENT_ARG(t);
ptm_bfd_xmt_TO(bs, 0);
}
-void bfd_echo_xmt_cb(struct thread *t)
+void bfd_echo_xmt_cb(struct event *t)
{
- struct bfd_session *bs = THREAD_ARG(t);
+ struct bfd_session *bs = EVENT_ARG(t);
if (bs->echo_xmt_TO > 0)
ptm_bfd_echo_xmt_TO(bs);
}
/* Was ptm_bfd_detect_TO() */
-void bfd_recvtimer_cb(struct thread *t)
+void bfd_recvtimer_cb(struct event *t)
{
- struct bfd_session *bs = THREAD_ARG(t);
+ struct bfd_session *bs = EVENT_ARG(t);
switch (bs->ses_state) {
case PTM_BFD_INIT:
}
/* Was ptm_bfd_echo_detect_TO() */
-void bfd_echo_recvtimer_cb(struct thread *t)
+void bfd_echo_recvtimer_cb(struct event *t)
{
- struct bfd_session *bs = THREAD_ARG(t);
+ struct bfd_session *bs = EVENT_ARG(t);
switch (bs->ses_state) {
case PTM_BFD_INIT:
bvrf->bg_echov6 = bp_echov6_socket(vrf);
if (!bvrf->bg_ev[0] && bvrf->bg_shop != -1)
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
- &bvrf->bg_ev[0]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
+ &bvrf->bg_ev[0]);
if (!bvrf->bg_ev[1] && bvrf->bg_mhop != -1)
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
- &bvrf->bg_ev[1]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
+ &bvrf->bg_ev[1]);
if (!bvrf->bg_ev[2] && bvrf->bg_shop6 != -1)
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
- &bvrf->bg_ev[2]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
+ &bvrf->bg_ev[2]);
if (!bvrf->bg_ev[3] && bvrf->bg_mhop6 != -1)
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
- &bvrf->bg_ev[3]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
+ &bvrf->bg_ev[3]);
if (!bvrf->bg_ev[4] && bvrf->bg_echo != -1)
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
- &bvrf->bg_ev[4]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
+ &bvrf->bg_ev[4]);
if (!bvrf->bg_ev[5] && bvrf->bg_echov6 != -1)
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
- &bvrf->bg_ev[5]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
+ &bvrf->bg_ev[5]);
if (vrf->vrf_id != VRF_DEFAULT) {
bfdd_zclient_register(vrf->vrf_id);
zlog_debug("VRF disable %s id %d", vrf->name, vrf->vrf_id);
/* Disable read/write poll triggering. */
- THREAD_OFF(bvrf->bg_ev[0]);
- THREAD_OFF(bvrf->bg_ev[1]);
- THREAD_OFF(bvrf->bg_ev[2]);
- THREAD_OFF(bvrf->bg_ev[3]);
- THREAD_OFF(bvrf->bg_ev[4]);
- THREAD_OFF(bvrf->bg_ev[5]);
+ EVENT_OFF(bvrf->bg_ev[0]);
+ EVENT_OFF(bvrf->bg_ev[1]);
+ EVENT_OFF(bvrf->bg_ev[2]);
+ EVENT_OFF(bvrf->bg_ev[3]);
+ EVENT_OFF(bvrf->bg_ev[4]);
+ EVENT_OFF(bvrf->bg_ev[5]);
/* Close all descriptors. */
socket_close(&bvrf->bg_echo);
struct bfd_config_timers timers;
struct bfd_timers cur_timers;
uint64_t detect_TO;
- struct thread *echo_recvtimer_ev;
- struct thread *recvtimer_ev;
+ struct event *echo_recvtimer_ev;
+ struct event *recvtimer_ev;
uint64_t xmt_TO;
uint64_t echo_xmt_TO;
- struct thread *xmttimer_ev;
- struct thread *echo_xmttimer_ev;
+ struct event *xmttimer_ev;
+ struct event *echo_xmttimer_ev;
uint64_t echo_detect_TO;
/* software object state */
TAILQ_ENTRY(bfd_control_socket) bcs_entry;
int bcs_sd;
- struct thread *bcs_ev;
- struct thread *bcs_outev;
+ struct event *bcs_ev;
+ struct event *bcs_outev;
struct bcqueue bcs_bcqueue;
/* Notification data */
void control_shutdown(void);
int control_notify(struct bfd_session *bs, uint8_t notify_state);
int control_notify_config(const char *op, struct bfd_session *bs);
-void control_accept(struct thread *t);
+void control_accept(struct event *t);
/*
int bg_echov6;
struct vrf *vrf;
- struct thread *bg_ev[6];
+ struct event *bg_ev[6];
};
/* Forward declaration of data plane context struct. */
struct bfd_global {
int bg_csock;
- struct thread *bg_csockev;
+ struct event *bg_csockev;
struct bcslist bg_bcslist;
struct pllist bg_pllist;
/* Distributed BFD items. */
bool bg_use_dplane;
int bg_dplane_sock;
- struct thread *bg_dplane_sockev;
+ struct event *bg_dplane_sockev;
struct dplane_queue bg_dplaneq;
/* Debug options. */
void ptm_bfd_echo_snd(struct bfd_session *bfd);
void ptm_bfd_echo_fp_snd(struct bfd_session *bfd);
-void bfd_recv_cb(struct thread *t);
+void bfd_recv_cb(struct event *t);
/*
*
* Contains the code related with event loop.
*/
-typedef void (*bfd_ev_cb)(struct thread *t);
+typedef void (*bfd_ev_cb)(struct event *t);
void bfd_recvtimer_update(struct bfd_session *bs);
void bfd_echo_recvtimer_update(struct bfd_session *bs);
unsigned long bfd_get_session_count(void);
/* Export callback functions for `event.c`. */
-extern struct thread_master *master;
+extern struct event_loop *master;
-void bfd_recvtimer_cb(struct thread *t);
-void bfd_echo_recvtimer_cb(struct thread *t);
-void bfd_xmt_cb(struct thread *t);
-void bfd_echo_xmt_cb(struct thread *t);
+void bfd_recvtimer_cb(struct event *t);
+void bfd_echo_recvtimer_cb(struct event *t);
+void bfd_xmt_cb(struct event *t);
+void bfd_echo_xmt_cb(struct event *t);
extern struct in6_addr zero_addr;
static void bfd_sd_reschedule(struct bfd_vrf_global *bvrf, int sd)
{
if (sd == bvrf->bg_shop) {
- THREAD_OFF(bvrf->bg_ev[0]);
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
- &bvrf->bg_ev[0]);
+ EVENT_OFF(bvrf->bg_ev[0]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
+ &bvrf->bg_ev[0]);
} else if (sd == bvrf->bg_mhop) {
- THREAD_OFF(bvrf->bg_ev[1]);
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
- &bvrf->bg_ev[1]);
+ EVENT_OFF(bvrf->bg_ev[1]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
+ &bvrf->bg_ev[1]);
} else if (sd == bvrf->bg_shop6) {
- THREAD_OFF(bvrf->bg_ev[2]);
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
- &bvrf->bg_ev[2]);
+ EVENT_OFF(bvrf->bg_ev[2]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
+ &bvrf->bg_ev[2]);
} else if (sd == bvrf->bg_mhop6) {
- THREAD_OFF(bvrf->bg_ev[3]);
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
- &bvrf->bg_ev[3]);
+ EVENT_OFF(bvrf->bg_ev[3]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
+ &bvrf->bg_ev[3]);
} else if (sd == bvrf->bg_echo) {
- THREAD_OFF(bvrf->bg_ev[4]);
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
- &bvrf->bg_ev[4]);
+ EVENT_OFF(bvrf->bg_ev[4]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
+ &bvrf->bg_ev[4]);
} else if (sd == bvrf->bg_echov6) {
- THREAD_OFF(bvrf->bg_ev[5]);
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
- &bvrf->bg_ev[5]);
+ EVENT_OFF(bvrf->bg_ev[5]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
+ &bvrf->bg_ev[5]);
}
}
mhop ? "yes" : "no", peerstr, localstr, portstr, vrfstr);
}
-void bfd_recv_cb(struct thread *t)
+void bfd_recv_cb(struct event *t)
{
- int sd = THREAD_FD(t);
+ int sd = EVENT_FD(t);
struct bfd_session *bfd;
struct bfd_pkt *cp;
bool is_mhop;
struct sockaddr_any local, peer;
uint8_t msgbuf[1516];
struct interface *ifp = NULL;
- struct bfd_vrf_global *bvrf = THREAD_ARG(t);
+ struct bfd_vrf_global *bvrf = EVENT_ARG(t);
/* Schedule next read. */
bfd_sd_reschedule(bvrf, sd);
DEFINE_MTYPE(BFDD, BFDD_NOTIFICATION, "short-lived control notification data");
/* Master of threads. */
-struct thread_master *master;
+struct event_loop *master;
/* BFDd privileges */
static zebra_capabilities_t _caps_p[] = {ZCAP_BIND, ZCAP_SYS_ADMIN, ZCAP_NET_RAW};
/* Initialize zebra connection. */
bfdd_zclient_init(&bglobal.bfdd_privs);
- thread_add_read(master, control_accept, NULL, bglobal.bg_csock,
- &bglobal.bg_csockev);
+ event_add_read(master, control_accept, NULL, bglobal.bg_csock,
+ &bglobal.bg_csockev);
/* Install commands. */
bfdd_vty_init();
struct bfd_control_socket *control_new(int sd);
static void control_free(struct bfd_control_socket *bcs);
static void control_reset_buf(struct bfd_control_buffer *bcb);
-static void control_read(struct thread *t);
-static void control_write(struct thread *t);
+static void control_read(struct event *t);
+static void control_write(struct event *t);
static void control_handle_request_add(struct bfd_control_socket *bcs,
struct bfd_control_msg *bcm);
{
struct bfd_control_socket *bcs;
- thread_cancel(&bglobal.bg_csockev);
+ event_cancel(&bglobal.bg_csockev);
socket_close(&bglobal.bg_csock);
}
}
-void control_accept(struct thread *t)
+void control_accept(struct event *t)
{
- int csock, sd = THREAD_FD(t);
+ int csock, sd = EVENT_FD(t);
csock = accept(sd, NULL, 0);
if (csock == -1) {
control_new(csock);
- thread_add_read(master, control_accept, NULL, sd, &bglobal.bg_csockev);
+ event_add_read(master, control_accept, NULL, sd, &bglobal.bg_csockev);
}
bcs->bcs_notify = 0;
bcs->bcs_sd = sd;
- thread_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
+ event_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
TAILQ_INIT(&bcs->bcs_bcqueue);
TAILQ_INIT(&bcs->bcs_bnplist);
struct bfd_control_queue *bcq;
struct bfd_notify_peer *bnp;
- thread_cancel(&(bcs->bcs_ev));
- thread_cancel(&(bcs->bcs_outev));
+ event_cancel(&(bcs->bcs_ev));
+ event_cancel(&(bcs->bcs_outev));
close(bcs->bcs_sd);
bcs->bcs_bout = &bcq->bcq_bcb;
bcs->bcs_outev = NULL;
- thread_add_write(master, control_write, bcs, bcs->bcs_sd,
- &bcs->bcs_outev);
+ event_add_write(master, control_write, bcs, bcs->bcs_sd,
+ &bcs->bcs_outev);
return 1;
empty_list:
- thread_cancel(&(bcs->bcs_outev));
+ event_cancel(&(bcs->bcs_outev));
bcs->bcs_bout = NULL;
return 0;
}
bcs->bcs_bout = bcb;
/* New messages, active write events. */
- thread_add_write(master, control_write, bcs, bcs->bcs_sd,
- &bcs->bcs_outev);
+ event_add_write(master, control_write, bcs, bcs->bcs_sd,
+ &bcs->bcs_outev);
}
return 0;
bcb->bcb_left = 0;
}
-static void control_read(struct thread *t)
+static void control_read(struct event *t)
{
- struct bfd_control_socket *bcs = THREAD_ARG(t);
+ struct bfd_control_socket *bcs = EVENT_ARG(t);
struct bfd_control_buffer *bcb = &bcs->bcs_bin;
int sd = bcs->bcs_sd;
struct bfd_control_msg bcm;
schedule_next_read:
bcs->bcs_ev = NULL;
- thread_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
+ event_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
}
-static void control_write(struct thread *t)
+static void control_write(struct event *t)
{
- struct bfd_control_socket *bcs = THREAD_ARG(t);
+ struct bfd_control_socket *bcs = EVENT_ARG(t);
struct bfd_control_buffer *bcb = bcs->bcs_bout;
int sd = bcs->bcs_sd;
ssize_t bwrite;
if (bwrite < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) {
bcs->bcs_outev = NULL;
- thread_add_write(master, control_write, bcs,
- bcs->bcs_sd, &bcs->bcs_outev);
+ event_add_write(master, control_write, bcs, bcs->bcs_sd,
+ &bcs->bcs_outev);
return;
}
bcb->bcb_left -= bwrite;
if (bcb->bcb_left > 0) {
bcs->bcs_outev = NULL;
- thread_add_write(master, control_write, bcs, bcs->bcs_sd,
- &bcs->bcs_outev);
+ event_add_write(master, control_write, bcs, bcs->bcs_sd,
+ &bcs->bcs_outev);
return;
}
#include "lib/network.h"
#include "lib/printfrr.h"
#include "lib/stream.h"
-#include "lib/thread.h"
+#include "lib/frrevent.h"
#include "bfd.h"
#include "bfddp_packet.h"
/** Output buffer data. */
struct stream *outbuf;
/** Input event data. */
- struct thread *inbufev;
+ struct event *inbufev;
/** Output event data. */
- struct thread *outbufev;
+ struct event *outbufev;
/** Connection event. */
- struct thread *connectev;
+ struct event *connectev;
/** Amount of bytes read. */
uint64_t in_bytes;
*/
typedef void (*bfd_dplane_expect_cb)(struct bfddp_message *msg, void *arg);
-static void bfd_dplane_client_connect(struct thread *t);
+static void bfd_dplane_client_connect(struct event *t);
static bool bfd_dplane_client_connecting(struct bfd_dplane_ctx *bdc);
static void bfd_dplane_ctx_free(struct bfd_dplane_ctx *bdc);
static int _bfd_dplane_add_session(struct bfd_dplane_ctx *bdc,
stream_pulldown(bdc->outbuf);
/* Disable write ready events. */
- THREAD_OFF(bdc->outbufev);
+ EVENT_OFF(bdc->outbufev);
return total;
}
-static void bfd_dplane_write(struct thread *t)
+static void bfd_dplane_write(struct event *t)
{
- struct bfd_dplane_ctx *bdc = THREAD_ARG(t);
+ struct bfd_dplane_ctx *bdc = EVENT_ARG(t);
/* Handle connection stage. */
if (bdc->connecting && bfd_dplane_client_connecting(bdc))
/* Schedule if it is not yet. */
if (bdc->outbufev == NULL)
- thread_add_write(master, bfd_dplane_write, bdc, bdc->sock,
- &bdc->outbufev);
+ event_add_write(master, bfd_dplane_write, bdc, bdc->sock,
+ &bdc->outbufev);
return 0;
}
return 0;
}
-static void bfd_dplane_read(struct thread *t)
+static void bfd_dplane_read(struct event *t)
{
- struct bfd_dplane_ctx *bdc = THREAD_ARG(t);
+ struct bfd_dplane_ctx *bdc = EVENT_ARG(t);
int rv;
rv = bfd_dplane_expect(bdc, 0, bfd_dplane_handle_message, NULL);
return;
stream_pulldown(bdc->inbuf);
- thread_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
+ event_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
}
static void _bfd_session_register_dplane(struct hash_bucket *hb, void *arg)
if (sock == -1)
return bdc;
- thread_add_read(master, bfd_dplane_read, bdc, sock, &bdc->inbufev);
+ event_add_read(master, bfd_dplane_read, bdc, sock, &bdc->inbufev);
/* Register all unattached sessions. */
bfd_key_iterate(_bfd_session_register_dplane, bdc);
/* Client mode has special treatment. */
if (bdc->client) {
/* Disable connection event if any. */
- THREAD_OFF(bdc->connectev);
+ EVENT_OFF(bdc->connectev);
/* Normal treatment on shutdown. */
if (bglobal.bg_shutdown)
/* Attempt reconnection. */
socket_close(&bdc->sock);
- THREAD_OFF(bdc->inbufev);
- THREAD_OFF(bdc->outbufev);
- thread_add_timer(master, bfd_dplane_client_connect, bdc, 3,
- &bdc->connectev);
+ EVENT_OFF(bdc->inbufev);
+ EVENT_OFF(bdc->outbufev);
+ event_add_timer(master, bfd_dplane_client_connect, bdc, 3,
+ &bdc->connectev);
return;
}
socket_close(&bdc->sock);
stream_free(bdc->inbuf);
stream_free(bdc->outbuf);
- THREAD_OFF(bdc->inbufev);
- THREAD_OFF(bdc->outbufev);
+ EVENT_OFF(bdc->inbufev);
+ EVENT_OFF(bdc->outbufev);
XFREE(MTYPE_BFDD_DPLANE_CTX, bdc);
}
/*
* Data plane listening socket.
*/
-static void bfd_dplane_accept(struct thread *t)
+static void bfd_dplane_accept(struct event *t)
{
- struct bfd_global *bg = THREAD_ARG(t);
+ struct bfd_global *bg = EVENT_ARG(t);
struct bfd_dplane_ctx *bdc;
int sock;
zlog_debug("%s: new data plane client connected", __func__);
reschedule_and_return:
- thread_add_read(master, bfd_dplane_accept, bg, bg->bg_dplane_sock,
- &bglobal.bg_dplane_sockev);
+ event_add_read(master, bfd_dplane_accept, bg, bg->bg_dplane_sock,
+ &bglobal.bg_dplane_sockev);
}
/*
stream_reset(bdc->outbuf);
/* Ask for read notifications. */
- thread_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
+ event_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
/* Remove all sessions then register again to send them all. */
bfd_key_iterate(_bfd_session_unregister_dplane, bdc);
}
}
-static void bfd_dplane_client_connect(struct thread *t)
+static void bfd_dplane_client_connect(struct event *t)
{
- struct bfd_dplane_ctx *bdc = THREAD_ARG(t);
+ struct bfd_dplane_ctx *bdc = EVENT_ARG(t);
int rv, sock;
socklen_t rvlen = sizeof(rv);
/* If we are not connected yet, ask for write notifications. */
bdc->connecting = true;
- thread_add_write(master, bfd_dplane_write, bdc, bdc->sock,
- &bdc->outbufev);
+ event_add_write(master, bfd_dplane_write, bdc, bdc->sock,
+ &bdc->outbufev);
} else {
if (bglobal.debug_dplane)
zlog_debug("%s: server connection: %d", __func__, sock);
}
reschedule_connect:
- THREAD_OFF(bdc->inbufev);
- THREAD_OFF(bdc->outbufev);
+ EVENT_OFF(bdc->inbufev);
+ EVENT_OFF(bdc->outbufev);
socket_close(&sock);
- thread_add_timer(master, bfd_dplane_client_connect, bdc, 3,
- &bdc->connectev);
+ event_add_timer(master, bfd_dplane_client_connect, bdc, 3,
+ &bdc->connectev);
}
static void bfd_dplane_client_init(const struct sockaddr *sa, socklen_t salen)
bdc->client = true;
- thread_add_timer(master, bfd_dplane_client_connect, bdc, 0,
- &bdc->connectev);
+ event_add_timer(master, bfd_dplane_client_connect, bdc, 0,
+ &bdc->connectev);
/* Insert into data plane lists. */
TAILQ_INSERT_TAIL(&bglobal.bg_dplaneq, bdc, entry);
bfd_dplane_ctx_free(bdc);
/* Cancel accept thread and close socket. */
- THREAD_OFF(bglobal.bg_dplane_sockev);
+ EVENT_OFF(bglobal.bg_dplane_sockev);
close(bglobal.bg_dplane_sock);
return 0;
}
bglobal.bg_dplane_sock = sock;
- thread_add_read(master, bfd_dplane_accept, &bglobal, sock,
- &bglobal.bg_dplane_sockev);
+ event_add_read(master, bfd_dplane_accept, &bglobal, sock,
+ &bglobal.bg_dplane_sockev);
}
int bfd_dplane_add_session(struct bfd_session *bs)
tv_normalize(&tv);
- thread_add_timer_tv(master, bfd_recvtimer_cb, bs, &tv,
- &bs->recvtimer_ev);
+ event_add_timer_tv(master, bfd_recvtimer_cb, bs, &tv,
+ &bs->recvtimer_ev);
}
void bfd_echo_recvtimer_update(struct bfd_session *bs)
tv_normalize(&tv);
- thread_add_timer_tv(master, bfd_echo_recvtimer_cb, bs, &tv,
- &bs->echo_recvtimer_ev);
+ event_add_timer_tv(master, bfd_echo_recvtimer_cb, bs, &tv,
+ &bs->echo_recvtimer_ev);
}
void bfd_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
tv_normalize(&tv);
- thread_add_timer_tv(master, bfd_xmt_cb, bs, &tv, &bs->xmttimer_ev);
+ event_add_timer_tv(master, bfd_xmt_cb, bs, &tv, &bs->xmttimer_ev);
}
void bfd_echo_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
tv_normalize(&tv);
- thread_add_timer_tv(master, bfd_echo_xmt_cb, bs, &tv,
- &bs->echo_xmttimer_ev);
+ event_add_timer_tv(master, bfd_echo_xmt_cb, bs, &tv,
+ &bs->echo_xmttimer_ev);
}
void bfd_recvtimer_delete(struct bfd_session *bs)
{
- THREAD_OFF(bs->recvtimer_ev);
+ EVENT_OFF(bs->recvtimer_ev);
}
void bfd_echo_recvtimer_delete(struct bfd_session *bs)
{
- THREAD_OFF(bs->echo_recvtimer_ev);
+ EVENT_OFF(bs->echo_recvtimer_ev);
}
void bfd_xmttimer_delete(struct bfd_session *bs)
{
- THREAD_OFF(bs->xmttimer_ev);
+ EVENT_OFF(bs->xmttimer_ev);
}
void bfd_echo_xmttimer_delete(struct bfd_session *bs)
{
- THREAD_OFF(bs->echo_xmttimer_ev);
+ EVENT_OFF(bs->echo_xmttimer_ev);
}
#include "memory.h"
#include "prefix.h"
#include "hash.h"
-#include "thread.h"
+#include "frrevent.h"
#include "queue.h"
#include "filter.h"
void aspath_finish(void)
{
- hash_clean(ashash, (void (*)(void *))aspath_free);
- hash_free(ashash);
- ashash = NULL;
+ hash_clean_and_free(&ashash, (void (*)(void *))aspath_free);
if (snmp_stream)
stream_free(snmp_stream);
static void cluster_finish(void)
{
- hash_clean(cluster_hash, (void (*)(void *))cluster_free);
- hash_free(cluster_hash);
- cluster_hash = NULL;
+ hash_clean_and_free(&cluster_hash, (void (*)(void *))cluster_free);
}
static struct hash *encap_hash = NULL;
static void encap_finish(void)
{
- hash_clean(encap_hash, (void (*)(void *))encap_free);
- hash_free(encap_hash);
- encap_hash = NULL;
+ hash_clean_and_free(&encap_hash, (void (*)(void *))encap_free);
#ifdef ENABLE_BGP_VNC
- hash_clean(vnc_hash, (void (*)(void *))encap_free);
- hash_free(vnc_hash);
- vnc_hash = NULL;
+ hash_clean_and_free(&vnc_hash, (void (*)(void *))encap_free);
#endif
}
static void srv6_finish(void)
{
- hash_clean(srv6_l3vpn_hash, (void (*)(void *))srv6_l3vpn_free);
- hash_free(srv6_l3vpn_hash);
- srv6_l3vpn_hash = NULL;
- hash_clean(srv6_vpn_hash, (void (*)(void *))srv6_vpn_free);
- hash_free(srv6_vpn_hash);
- srv6_vpn_hash = NULL;
+ hash_clean_and_free(&srv6_l3vpn_hash,
+ (void (*)(void *))srv6_l3vpn_free);
+ hash_clean_and_free(&srv6_vpn_hash, (void (*)(void *))srv6_vpn_free);
}
static unsigned int transit_hash_key_make(const void *p)
static void transit_finish(void)
{
- hash_clean(transit_hash, (void (*)(void *))transit_free);
- hash_free(transit_hash);
- transit_hash = NULL;
+ hash_clean_and_free(&transit_hash, (void (*)(void *))transit_free);
}
/* Attribute hash routines. */
static void attrhash_finish(void)
{
- hash_clean(attrhash, attr_vfree);
- hash_free(attrhash);
- attrhash = NULL;
+ hash_clean_and_free(&attrhash, attr_vfree);
}
static void attr_show_all_iterator(struct hash_bucket *bucket, struct vty *vty)
#include "linklist.h"
#include "memory.h"
#include "prefix.h"
-#include "thread.h"
+#include "frrevent.h"
#include "buffer.h"
#include "stream.h"
#include "vrf.h"
}
#endif /* HAVE_BFDD */
-void bgp_bfd_init(struct thread_master *tm)
+void bgp_bfd_init(struct event_loop *tm)
{
/* Initialize BFD client functions */
bfd_protocol_integration_init(zclient, tm);
((((peer)->sort == BGP_PEER_IBGP) && !(peer)->shared_network) \
|| is_ebgp_multihop_configured((peer)))
-extern void bgp_bfd_init(struct thread_master *tm);
+extern void bgp_bfd_init(struct event_loop *tm);
extern void bgp_bfd_peer_config_write(struct vty *vty, const struct peer *peer,
const char *addr);
#include "sockunion.h"
#include "command.h"
#include "prefix.h"
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "queue.h"
#include "pullwr.h"
(*cnt)++;
}
-static void bmp_stats(struct thread *thread)
+static void bmp_stats(struct event *thread)
{
- struct bmp_targets *bt = THREAD_ARG(thread);
+ struct bmp_targets *bt = EVENT_ARG(thread);
struct stream *s;
struct peer *peer;
struct listnode *node;
struct timeval tv;
if (bt->stat_msec)
- thread_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
- &bt->t_stats);
+ event_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
+ &bt->t_stats);
gettimeofday(&tv, NULL);
}
/* read from the BMP socket to detect session termination */
-static void bmp_read(struct thread *t)
+static void bmp_read(struct event *t)
{
- struct bmp *bmp = THREAD_ARG(t);
+ struct bmp *bmp = EVENT_ARG(t);
char buf[1024];
ssize_t n;
return;
}
- thread_add_read(bm->master, bmp_read, bmp, bmp->socket, &bmp->t_read);
+ event_add_read(bm->master, bmp_read, bmp, bmp->socket, &bmp->t_read);
}
static struct bmp *bmp_open(struct bmp_targets *bt, int bmp_sock)
bmp->state = BMP_PeerUp;
bmp->pullwr = pullwr_new(bm->master, bmp_sock, bmp, bmp_wrfill,
bmp_wrerr);
- thread_add_read(bm->master, bmp_read, bmp, bmp_sock, &bmp->t_read);
+ event_add_read(bm->master, bmp_read, bmp, bmp_sock, &bmp->t_read);
bmp_send_initiation(bmp);
return bmp;
}
/* Accept BMP connection. */
-static void bmp_accept(struct thread *thread)
+static void bmp_accept(struct event *thread)
{
union sockunion su;
- struct bmp_listener *bl = THREAD_ARG(thread);
+ struct bmp_listener *bl = EVENT_ARG(thread);
int bmp_sock;
/* We continue hearing BMP socket. */
- thread_add_read(bm->master, bmp_accept, bl, bl->sock, &bl->t_accept);
+ event_add_read(bm->master, bmp_accept, bl, bl->sock, &bl->t_accept);
memset(&su, 0, sizeof(union sockunion));
struct bmp_queue_entry *bqe;
struct bmp_mirrorq *bmq;
- THREAD_OFF(bmp->t_read);
+ EVENT_OFF(bmp->t_read);
if (bmp->active)
bmp_active_disconnected(bmp->active);
if (!bqe->refcount)
XFREE(MTYPE_BMP_QUEUE, bqe);
- THREAD_OFF(bmp->t_read);
+ EVENT_OFF(bmp->t_read);
pullwr_del(bmp->pullwr);
close(bmp->socket);
}
struct bmp *bmp;
struct bmp_active *ba;
- THREAD_OFF(bt->t_stats);
+ EVENT_OFF(bt->t_stats);
frr_each_safe (bmp_actives, &bt->actives, ba)
bmp_active_put(ba);
goto out_sock;
bl->sock = sock;
- thread_add_read(bm->master, bmp_accept, bl, sock, &bl->t_accept);
+ event_add_read(bm->master, bmp_accept, bl, sock, &bl->t_accept);
return;
out_sock:
close(sock);
static void bmp_listener_stop(struct bmp_listener *bl)
{
- THREAD_OFF(bl->t_accept);
+ EVENT_OFF(bl->t_accept);
if (bl->sock != -1)
close(bl->sock);
static void bmp_active_put(struct bmp_active *ba)
{
- THREAD_OFF(ba->t_timer);
- THREAD_OFF(ba->t_read);
- THREAD_OFF(ba->t_write);
+ EVENT_OFF(ba->t_timer);
+ EVENT_OFF(ba->t_read);
+ EVENT_OFF(ba->t_write);
bmp_actives_del(&ba->targets->actives, ba);
bmp_active_connect(ba);
}
-static void bmp_active_thread(struct thread *t)
+static void bmp_active_thread(struct event *t)
{
- struct bmp_active *ba = THREAD_ARG(t);
+ struct bmp_active *ba = EVENT_ARG(t);
socklen_t slen;
int status, ret;
vrf_id_t vrf_id;
/* all 3 end up here, though only timer or read+write are active
* at a time */
- THREAD_OFF(ba->t_timer);
- THREAD_OFF(ba->t_read);
- THREAD_OFF(ba->t_write);
+ EVENT_OFF(ba->t_timer);
+ EVENT_OFF(ba->t_read);
+ EVENT_OFF(ba->t_write);
ba->last_err = NULL;
static void bmp_active_setup(struct bmp_active *ba)
{
- THREAD_OFF(ba->t_timer);
- THREAD_OFF(ba->t_read);
- THREAD_OFF(ba->t_write);
+ EVENT_OFF(ba->t_timer);
+ EVENT_OFF(ba->t_read);
+ EVENT_OFF(ba->t_write);
if (ba->bmp)
return;
ba->curretry = ba->maxretry;
if (ba->socket == -1)
- thread_add_timer_msec(bm->master, bmp_active_thread, ba,
- ba->curretry, &ba->t_timer);
+ event_add_timer_msec(bm->master, bmp_active_thread, ba,
+ ba->curretry, &ba->t_timer);
else {
- thread_add_read(bm->master, bmp_active_thread, ba, ba->socket,
- &ba->t_read);
- thread_add_write(bm->master, bmp_active_thread, ba, ba->socket,
+ event_add_read(bm->master, bmp_active_thread, ba, ba->socket,
+ &ba->t_read);
+ event_add_write(bm->master, bmp_active_thread, ba, ba->socket,
&ba->t_write);
}
}
{
VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
- THREAD_OFF(bt->t_stats);
+ EVENT_OFF(bt->t_stats);
if (no)
bt->stat_msec = 0;
else if (interval_str)
bt->stat_msec = BMP_STAT_DEFAULT_TIMER;
if (bt->stat_msec)
- thread_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
- &bt->t_stats);
+ event_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
+ &bt->t_stats);
return CMD_SUCCESS;
}
uptime[0] = '\0';
if (ba->t_timer) {
- long trem = thread_timer_remain_second(
+ long trem = event_timer_remain_second(
ba->t_timer);
peer_uptime(monotime(NULL) - trem,
return 0;
}
-static int bgp_bmp_init(struct thread_master *tm)
+static int bgp_bmp_init(struct event_loop *tm)
{
install_node(&bmp_node);
install_default(BMP_NODE);
int socket;
char remote[SU_ADDRSTRLEN + 6];
- struct thread *t_read;
+ struct event *t_read;
struct pullwr *pullwr;
union sockunion addrs[8];
int socket;
const char *last_err;
- struct thread *t_timer, *t_read, *t_write;
+ struct event *t_timer, *t_read, *t_write;
};
/* config & state for passive / listening sockets */
union sockunion addr;
int port;
- struct thread *t_accept;
+ struct event *t_accept;
int sock;
};
struct bmp_actives_head actives;
- struct thread *t_stats;
+ struct event *t_stats;
struct bmp_session_head sessions;
struct bmp_qhash_head updhash;
void community_finish(void)
{
- hash_clean(comhash, community_hash_free);
- hash_free(comhash);
- comhash = NULL;
+ hash_clean_and_free(&comhash, community_hash_free);
}
static struct community *bgp_aggr_community_lookup(
void bgp_community_alias_finish(void)
{
- hash_clean(bgp_ca_community_hash, bgp_ca_free);
- hash_free(bgp_ca_community_hash);
- hash_clean(bgp_ca_alias_hash, bgp_ca_free);
- hash_free(bgp_ca_alias_hash);
+ hash_clean_and_free(&bgp_ca_community_hash, bgp_ca_free);
+ hash_clean_and_free(&bgp_ca_alias_hash, bgp_ca_free);
}
static void bgp_community_alias_show_iterator(struct hash_bucket *hb,
/* Handler of conditional advertisement timer event.
* Each route in the condition-map is evaluated.
*/
-static void bgp_conditional_adv_timer(struct thread *t)
+static void bgp_conditional_adv_timer(struct event *t)
{
afi_t afi;
safi_t safi;
route_map_result_t ret;
bool advmap_table_changed = false;
- bgp = THREAD_ARG(t);
+ bgp = EVENT_ARG(t);
assert(bgp);
- thread_add_timer(bm->master, bgp_conditional_adv_timer, bgp,
- bgp->condition_check_period, &bgp->t_condition_check);
+ event_add_timer(bm->master, bgp_conditional_adv_timer, bgp,
+ bgp->condition_check_period, &bgp->t_condition_check);
/* loop through each peer and check if we have peers with
* advmap_table_change attribute set, to make sure we send
}
/* Register for conditional routes polling timer */
- if (!thread_is_scheduled(bgp->t_condition_check))
- thread_add_timer(bm->master, bgp_conditional_adv_timer, bgp, 0,
- &bgp->t_condition_check);
+ if (!event_is_scheduled(bgp->t_condition_check))
+ event_add_timer(bm->master, bgp_conditional_adv_timer, bgp, 0,
+ &bgp->t_condition_check);
}
void bgp_conditional_adv_disable(struct peer *peer, afi_t afi, safi_t safi)
}
/* Last filter removed. So cancel conditional routes polling thread. */
- THREAD_OFF(bgp->t_condition_check);
+ EVENT_OFF(bgp->t_condition_check);
}
static void peer_advertise_map_filter_update(struct peer *peer, afi_t afi,
#include "memory.h"
#include "command.h"
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "queue.h"
#include "filter.h"
/* Handler of reuse timer event. Each route in the current reuse-list
is evaluated. RFC2439 Section 4.8.7. */
-static void bgp_reuse_timer(struct thread *t)
+static void bgp_reuse_timer(struct event *t)
{
struct bgp_damp_info *bdi;
struct bgp_damp_info *next;
time_t t_now, t_diff;
- struct bgp_damp_config *bdc = THREAD_ARG(t);
+ struct bgp_damp_config *bdc = EVENT_ARG(t);
bdc->t_reuse = NULL;
- thread_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
- &bdc->t_reuse);
+ event_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
+ &bdc->t_reuse);
t_now = monotime(NULL);
bgp_damp_parameter_set(half, reuse, suppress, max, bdc);
/* Register reuse timer. */
- thread_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
- &bdc->t_reuse);
+ event_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
+ &bdc->t_reuse);
return 0;
}
return 0;
/* Cancel reuse event. */
- THREAD_OFF(bdc->t_reuse);
+ EVENT_OFF(bdc->t_reuse);
/* Clean BGP dampening information. */
bgp_damp_info_clean(afi, safi);
struct bgp_damp_info *no_reuse_list;
/* Reuse timer thread per-set base. */
- struct thread *t_reuse;
+ struct event *t_reuse;
afi_t afi;
safi_t safi;
#include "sockunion.h"
#include "command.h"
#include "prefix.h"
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "queue.h"
#include "memory.h"
char *interval_str;
- struct thread *t_interval;
+ struct event *t_interval;
};
static int bgp_dump_unset(struct bgp_dump *bgp_dump);
-static void bgp_dump_interval_func(struct thread *);
+static void bgp_dump_interval_func(struct event *);
/* BGP packet dump output buffer. */
struct stream *bgp_dump_obuf;
interval = interval
- secs_into_day % interval; /* always > 0 */
}
- thread_add_timer(bm->master, bgp_dump_interval_func, bgp_dump,
- interval, &bgp_dump->t_interval);
+ event_add_timer(bm->master, bgp_dump_interval_func, bgp_dump,
+ interval, &bgp_dump->t_interval);
} else {
/* One-off dump: execute immediately, don't affect any scheduled
* dumps */
- thread_add_event(bm->master, bgp_dump_interval_func, bgp_dump,
- 0, &bgp_dump->t_interval);
+ event_add_event(bm->master, bgp_dump_interval_func, bgp_dump, 0,
+ &bgp_dump->t_interval);
}
return 0;
return seq;
}
-static void bgp_dump_interval_func(struct thread *t)
+static void bgp_dump_interval_func(struct event *t)
{
struct bgp_dump *bgp_dump;
- bgp_dump = THREAD_ARG(t);
+ bgp_dump = EVENT_ARG(t);
/* Reschedule dump even if file couldn't be opened this time... */
if (bgp_dump_open_file(bgp_dump) != NULL) {
}
/* Removing interval event. */
- THREAD_OFF(bgp_dump->t_interval);
+ EVENT_OFF(bgp_dump->t_interval);
bgp_dump->interval = 0;
void ecommunity_finish(void)
{
- hash_clean(ecomhash, (void (*)(void *))ecommunity_hash_free);
- hash_free(ecomhash);
- ecomhash = NULL;
+ hash_clean_and_free(&ecomhash, (void (*)(void *))ecommunity_hash_free);
}
/* Extended Communities token enum. */
(void (*)(struct hash_bucket *, void *))free_vni_entry,
bgp);
- hash_clean(bgp->import_rt_hash, (void (*)(void *))hash_import_rt_free);
- hash_free(bgp->import_rt_hash);
- bgp->import_rt_hash = NULL;
-
- hash_clean(bgp->vrf_import_rt_hash,
- (void (*)(void *))hash_vrf_import_rt_free);
- hash_free(bgp->vrf_import_rt_hash);
- bgp->vrf_import_rt_hash = NULL;
-
- hash_clean(bgp->vni_svi_hash, (void (*)(void *))hash_evpn_free);
- hash_free(bgp->vni_svi_hash);
- bgp->vni_svi_hash = NULL;
- hash_free(bgp->vnihash);
- bgp->vnihash = NULL;
+ hash_clean_and_free(&bgp->import_rt_hash,
+ (void (*)(void *))hash_import_rt_free);
+
+ hash_clean_and_free(&bgp->vrf_import_rt_hash,
+ (void (*)(void *))hash_vrf_import_rt_free);
+
+ hash_clean_and_free(&bgp->vni_svi_hash,
+ (void (*)(void *))hash_evpn_free);
+
+ /*
+ * Why is the vnihash freed at the top of this function and
+ * then deleted here?
+ */
+ hash_clean_and_free(&bgp->vnihash, NULL);
list_delete(&bgp->vrf_import_rtl);
list_delete(&bgp->vrf_export_rtl);
bool is_local);
esi_t zero_esi_buf, *zero_esi = &zero_esi_buf;
-static void bgp_evpn_run_consistency_checks(struct thread *t);
+static void bgp_evpn_run_consistency_checks(struct event *t);
static void bgp_evpn_path_nh_info_free(struct bgp_path_evpn_nh_info *nh_info);
static void bgp_evpn_path_nh_unlink(struct bgp_path_evpn_nh_info *nh_info);
if (BGP_DEBUG(evpn_mh, EVPN_MH_ES))
zlog_debug("periodic consistency checking started");
- thread_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
- BGP_EVPN_CONS_CHECK_INTERVAL,
- &bgp_mh_info->t_cons_check);
+ event_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
+ BGP_EVPN_CONS_CHECK_INTERVAL,
+ &bgp_mh_info->t_cons_check);
}
/* queue up the es for background consistency checks */
return proc_cnt;
}
-static void bgp_evpn_run_consistency_checks(struct thread *t)
+static void bgp_evpn_run_consistency_checks(struct event *t)
{
int proc_cnt = 0;
struct listnode *node;
}
/* restart the timer */
- thread_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
+ event_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
BGP_EVPN_CONS_CHECK_INTERVAL,
&bgp_mh_info->t_cons_check);
}
bgp_vrf->evpn_nh_table,
(void (*)(struct hash_bucket *, void *))bgp_evpn_nh_flush_cb,
NULL);
- hash_clean(bgp_vrf->evpn_nh_table, (void (*)(void *))hash_evpn_nh_free);
- hash_free(bgp_vrf->evpn_nh_table);
- bgp_vrf->evpn_nh_table = NULL;
+ hash_clean_and_free(&bgp_vrf->evpn_nh_table,
+ (void (*)(void *))hash_evpn_nh_free);
}
static void bgp_evpn_nh_update_ref_pi(struct bgp_evpn_nh *nh)
bgp_evpn_es_local_info_clear(es, true);
}
if (bgp_mh_info->t_cons_check)
- THREAD_OFF(bgp_mh_info->t_cons_check);
+ EVENT_OFF(bgp_mh_info->t_cons_check);
list_delete(&bgp_mh_info->local_es_list);
list_delete(&bgp_mh_info->pend_es_list);
list_delete(&bgp_mh_info->ead_es_export_rtl);
/* List of ESs with pending/periodic processing */
struct list *pend_es_list;
/* periodic timer for running background consistency checks */
- struct thread *t_cons_check;
+ struct event *t_cons_check;
/* config knobs for optimizing or interop */
/* Generate EAD-EVI routes even if the ES is oper-down. This can be
#include "linklist.h"
#include "prefix.h"
#include "sockunion.h"
-#include "thread.h"
+#include "frrevent.h"
#include "log.h"
#include "stream.h"
#include "ringbuf.h"
function. */
/* BGP event function. */
-void bgp_event(struct thread *);
+void bgp_event(struct event *event);
/* BGP thread functions. */
-static void bgp_start_timer(struct thread *);
-static void bgp_connect_timer(struct thread *);
-static void bgp_holdtime_timer(struct thread *);
-static void bgp_delayopen_timer(struct thread *);
+static void bgp_start_timer(struct event *event);
+static void bgp_connect_timer(struct event *event);
+static void bgp_holdtime_timer(struct event *event);
+static void bgp_delayopen_timer(struct event *event);
/* BGP FSM functions. */
static enum bgp_fsm_state_progress bgp_start(struct peer *);
*/
bgp_keepalives_off(from_peer);
- THREAD_OFF(peer->t_routeadv);
- THREAD_OFF(peer->t_connect);
- THREAD_OFF(peer->t_delayopen);
- THREAD_OFF(peer->t_connect_check_r);
- THREAD_OFF(peer->t_connect_check_w);
- THREAD_OFF(from_peer->t_routeadv);
- THREAD_OFF(from_peer->t_connect);
- THREAD_OFF(from_peer->t_delayopen);
- THREAD_OFF(from_peer->t_connect_check_r);
- THREAD_OFF(from_peer->t_connect_check_w);
- THREAD_OFF(from_peer->t_process_packet);
+ EVENT_OFF(peer->t_routeadv);
+ EVENT_OFF(peer->t_connect);
+ EVENT_OFF(peer->t_delayopen);
+ EVENT_OFF(peer->t_connect_check_r);
+ EVENT_OFF(peer->t_connect_check_w);
+ EVENT_OFF(from_peer->t_routeadv);
+ EVENT_OFF(from_peer->t_connect);
+ EVENT_OFF(from_peer->t_delayopen);
+ EVENT_OFF(from_peer->t_connect_check_r);
+ EVENT_OFF(from_peer->t_connect_check_w);
+ EVENT_OFF(from_peer->t_process_packet);
/*
* At this point in time, it is possible that there are packets pending
bgp_reads_on(peer);
bgp_writes_on(peer);
- thread_add_event(bm->master, bgp_process_packet, peer, 0,
- &peer->t_process_packet);
+ event_add_event(bm->master, bgp_process_packet, peer, 0,
+ &peer->t_process_packet);
return (peer);
}
inactive. All other timer must be turned off */
if (BGP_PEER_START_SUPPRESSED(peer) || !peer_active(peer)
|| peer->bgp->vrf_id == VRF_UNKNOWN) {
- THREAD_OFF(peer->t_start);
+ EVENT_OFF(peer->t_start);
} else {
BGP_TIMER_ON(peer->t_start, bgp_start_timer,
peer->v_start);
}
- THREAD_OFF(peer->t_connect);
- THREAD_OFF(peer->t_holdtime);
+ EVENT_OFF(peer->t_connect);
+ EVENT_OFF(peer->t_holdtime);
bgp_keepalives_off(peer);
- THREAD_OFF(peer->t_routeadv);
- THREAD_OFF(peer->t_delayopen);
+ EVENT_OFF(peer->t_routeadv);
+ EVENT_OFF(peer->t_delayopen);
break;
case Connect:
/* After start timer is expired, the peer moves to Connect
status. Make sure start timer is off and connect timer is
on. */
- THREAD_OFF(peer->t_start);
+ EVENT_OFF(peer->t_start);
if (CHECK_FLAG(peer->flags, PEER_FLAG_TIMER_DELAYOPEN))
BGP_TIMER_ON(peer->t_connect, bgp_connect_timer,
(peer->v_delayopen + peer->v_connect));
BGP_TIMER_ON(peer->t_connect, bgp_connect_timer,
peer->v_connect);
- THREAD_OFF(peer->t_holdtime);
+ EVENT_OFF(peer->t_holdtime);
bgp_keepalives_off(peer);
- THREAD_OFF(peer->t_routeadv);
+ EVENT_OFF(peer->t_routeadv);
break;
case Active:
/* Active is waiting connection from remote peer. And if
connect timer is expired, change status to Connect. */
- THREAD_OFF(peer->t_start);
+ EVENT_OFF(peer->t_start);
/* If peer is passive mode, do not set connect timer. */
if (CHECK_FLAG(peer->flags, PEER_FLAG_PASSIVE)
|| CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT)) {
- THREAD_OFF(peer->t_connect);
+ EVENT_OFF(peer->t_connect);
} else {
if (CHECK_FLAG(peer->flags, PEER_FLAG_TIMER_DELAYOPEN))
BGP_TIMER_ON(
BGP_TIMER_ON(peer->t_connect, bgp_connect_timer,
peer->v_connect);
}
- THREAD_OFF(peer->t_holdtime);
+ EVENT_OFF(peer->t_holdtime);
bgp_keepalives_off(peer);
- THREAD_OFF(peer->t_routeadv);
+ EVENT_OFF(peer->t_routeadv);
break;
case OpenSent:
/* OpenSent status. */
- THREAD_OFF(peer->t_start);
- THREAD_OFF(peer->t_connect);
+ EVENT_OFF(peer->t_start);
+ EVENT_OFF(peer->t_connect);
if (peer->v_holdtime != 0) {
BGP_TIMER_ON(peer->t_holdtime, bgp_holdtime_timer,
peer->v_holdtime);
} else {
- THREAD_OFF(peer->t_holdtime);
+ EVENT_OFF(peer->t_holdtime);
}
bgp_keepalives_off(peer);
- THREAD_OFF(peer->t_routeadv);
- THREAD_OFF(peer->t_delayopen);
+ EVENT_OFF(peer->t_routeadv);
+ EVENT_OFF(peer->t_delayopen);
break;
case OpenConfirm:
/* OpenConfirm status. */
- THREAD_OFF(peer->t_start);
- THREAD_OFF(peer->t_connect);
+ EVENT_OFF(peer->t_start);
+ EVENT_OFF(peer->t_connect);
/*
* If the negotiated Hold Time value is zero, then the Hold Time
* Additionally if a different hold timer has been negotiated
* than we must stop then start the timer again
*/
- THREAD_OFF(peer->t_holdtime);
+ EVENT_OFF(peer->t_holdtime);
if (peer->v_holdtime == 0)
bgp_keepalives_off(peer);
else {
peer->v_holdtime);
bgp_keepalives_on(peer);
}
- THREAD_OFF(peer->t_routeadv);
- THREAD_OFF(peer->t_delayopen);
+ EVENT_OFF(peer->t_routeadv);
+ EVENT_OFF(peer->t_delayopen);
break;
case Established:
/* In Established status start and connect timer is turned
off. */
- THREAD_OFF(peer->t_start);
- THREAD_OFF(peer->t_connect);
- THREAD_OFF(peer->t_delayopen);
+ EVENT_OFF(peer->t_start);
+ EVENT_OFF(peer->t_connect);
+ EVENT_OFF(peer->t_delayopen);
/*
* Same as OpenConfirm, if holdtime is zero then both holdtime
* Additionally if a different hold timer has been negotiated
* then we must stop then start the timer again
*/
- THREAD_OFF(peer->t_holdtime);
+ EVENT_OFF(peer->t_holdtime);
if (peer->v_holdtime == 0)
bgp_keepalives_off(peer);
else {
}
break;
case Deleted:
- THREAD_OFF(peer->t_gr_restart);
- THREAD_OFF(peer->t_gr_stale);
+ EVENT_OFF(peer->t_gr_restart);
+ EVENT_OFF(peer->t_gr_stale);
FOREACH_AFI_SAFI (afi, safi)
- THREAD_OFF(peer->t_llgr_stale[afi][safi]);
+ EVENT_OFF(peer->t_llgr_stale[afi][safi]);
- THREAD_OFF(peer->t_pmax_restart);
- THREAD_OFF(peer->t_refresh_stalepath);
+ EVENT_OFF(peer->t_pmax_restart);
+ EVENT_OFF(peer->t_refresh_stalepath);
/* fallthru */
case Clearing:
- THREAD_OFF(peer->t_start);
- THREAD_OFF(peer->t_connect);
- THREAD_OFF(peer->t_holdtime);
+ EVENT_OFF(peer->t_start);
+ EVENT_OFF(peer->t_connect);
+ EVENT_OFF(peer->t_holdtime);
bgp_keepalives_off(peer);
- THREAD_OFF(peer->t_routeadv);
- THREAD_OFF(peer->t_delayopen);
+ EVENT_OFF(peer->t_routeadv);
+ EVENT_OFF(peer->t_delayopen);
break;
case BGP_STATUS_MAX:
flog_err(EC_LIB_DEVELOPMENT,
/* BGP start timer. This function set BGP_Start event to thread value
and process event. */
-static void bgp_start_timer(struct thread *thread)
+static void bgp_start_timer(struct event *thread)
{
struct peer *peer;
- peer = THREAD_ARG(thread);
+ peer = EVENT_ARG(thread);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Timer (start timer expire).", peer->host);
- THREAD_VAL(thread) = BGP_Start;
+ EVENT_VAL(thread) = BGP_Start;
bgp_event(thread); /* bgp_event unlocks peer */
}
/* BGP connect retry timer. */
-static void bgp_connect_timer(struct thread *thread)
+static void bgp_connect_timer(struct event *thread)
{
struct peer *peer;
- peer = THREAD_ARG(thread);
+ peer = EVENT_ARG(thread);
/* stop the DelayOpenTimer if it is running */
- THREAD_OFF(peer->t_delayopen);
+ EVENT_OFF(peer->t_delayopen);
assert(!peer->t_write);
assert(!peer->t_read);
if (CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER))
bgp_stop(peer);
else {
- THREAD_VAL(thread) = ConnectRetry_timer_expired;
+ EVENT_VAL(thread) = ConnectRetry_timer_expired;
bgp_event(thread); /* bgp_event unlocks peer */
}
}
/* BGP holdtime timer. */
-static void bgp_holdtime_timer(struct thread *thread)
+static void bgp_holdtime_timer(struct event *thread)
{
atomic_size_t inq_count;
struct peer *peer;
- peer = THREAD_ARG(thread);
+ peer = EVENT_ARG(thread);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Timer (holdtime timer expire)",
BGP_TIMER_ON(peer->t_holdtime, bgp_holdtime_timer,
peer->v_holdtime);
- THREAD_VAL(thread) = Hold_Timer_expired;
+ EVENT_VAL(thread) = Hold_Timer_expired;
bgp_event(thread); /* bgp_event unlocks peer */
}
-void bgp_routeadv_timer(struct thread *thread)
+void bgp_routeadv_timer(struct event *thread)
{
struct peer *peer;
- peer = THREAD_ARG(thread);
+ peer = EVENT_ARG(thread);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Timer (routeadv timer expire)",
peer->synctime = monotime(NULL);
- thread_add_timer_msec(bm->master, bgp_generate_updgrp_packets, peer, 0,
- &peer->t_generate_updgrp_packets);
+ event_add_timer_msec(bm->master, bgp_generate_updgrp_packets, peer, 0,
+ &peer->t_generate_updgrp_packets);
/* MRAI timer will be started again when FIFO is built, no need to
* do it here.
}
/* RFC 4271 DelayOpenTimer */
-void bgp_delayopen_timer(struct thread *thread)
+void bgp_delayopen_timer(struct event *thread)
{
struct peer *peer;
- peer = THREAD_ARG(thread);
+ peer = EVENT_ARG(thread);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Timer (DelayOpentimer expire)",
peer->host);
- THREAD_VAL(thread) = DelayOpen_timer_expired;
+ EVENT_VAL(thread) = DelayOpen_timer_expired;
bgp_event(thread); /* bgp_event unlocks peer */
}
return;
UNSET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT);
- THREAD_OFF(peer->t_gr_stale);
+ EVENT_OFF(peer->t_gr_stale);
if (peer_dynamic_neighbor(peer) &&
!(CHECK_FLAG(peer->flags, PEER_FLAG_DELETE))) {
bgp_timer_set(peer);
}
-static void bgp_llgr_stale_timer_expire(struct thread *thread)
+static void bgp_llgr_stale_timer_expire(struct event *thread)
{
struct peer_af *paf;
struct peer *peer;
afi_t afi;
safi_t safi;
- paf = THREAD_ARG(thread);
+ paf = EVENT_ARG(thread);
peer = paf->peer;
afi = paf->afi;
}
}
-static void bgp_graceful_restart_timer_expire(struct thread *thread)
+static void bgp_graceful_restart_timer_expire(struct event *thread)
{
struct peer *peer, *tmp_peer;
struct listnode *node, *nnode;
afi_t afi;
safi_t safi;
- peer = THREAD_ARG(thread);
+ peer = EVENT_ARG(thread);
if (bgp_debug_neighbor_events(peer)) {
zlog_debug("%pBP graceful restart timer expired", peer);
bgp_set_llgr_stale(peer, afi, safi);
bgp_clear_stale_route(peer, afi, safi);
- thread_add_timer(bm->master,
- bgp_llgr_stale_timer_expire, paf,
- peer->llgr[afi][safi].stale_time,
- &peer->t_llgr_stale[afi][safi]);
+ event_add_timer(bm->master, bgp_llgr_stale_timer_expire,
+ paf, peer->llgr[afi][safi].stale_time,
+ &peer->t_llgr_stale[afi][safi]);
for (ALL_LIST_ELEMENTS(peer->bgp->peer, node, nnode,
tmp_peer))
bgp_graceful_restart_timer_off(peer);
}
-static void bgp_graceful_stale_timer_expire(struct thread *thread)
+static void bgp_graceful_stale_timer_expire(struct event *thread)
{
struct peer *peer;
afi_t afi;
safi_t safi;
- peer = THREAD_ARG(thread);
+ peer = EVENT_ARG(thread);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP graceful restart stalepath timer expired",
}
/* Selection deferral timer processing function */
-static void bgp_graceful_deferral_timer_expire(struct thread *thread)
+static void bgp_graceful_deferral_timer_expire(struct event *thread)
{
struct afi_safi_info *info;
afi_t afi;
safi_t safi;
struct bgp *bgp;
- info = THREAD_ARG(thread);
+ info = EVENT_ARG(thread);
afi = info->afi;
safi = info->safi;
bgp = info->bgp;
on ending the update delay. */
void bgp_update_delay_end(struct bgp *bgp)
{
- THREAD_OFF(bgp->t_update_delay);
- THREAD_OFF(bgp->t_establish_wait);
+ EVENT_OFF(bgp->t_update_delay);
+ EVENT_OFF(bgp->t_establish_wait);
/* Reset update-delay related state */
bgp->update_delay_over = 1;
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
if (!peer_established(peer))
continue;
- THREAD_OFF(peer->t_routeadv);
+ EVENT_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
}
}
* different
* duration and schedule write thread immediately.
*/
- THREAD_OFF(peer->t_routeadv);
+ EVENT_OFF(peer->t_routeadv);
peer->synctime = monotime(NULL);
/* If suppress fib pending is enabled, route is advertised to
*/
diff = difftime(nowtime, peer->last_update);
if (diff > (double)peer->v_routeadv) {
- THREAD_OFF(peer->t_routeadv);
+ EVENT_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
return;
}
* (MRAI - m) < r
*/
if (peer->t_routeadv)
- remain = thread_timer_remain_second(peer->t_routeadv);
+ remain = event_timer_remain_second(peer->t_routeadv);
else
remain = peer->v_routeadv;
diff = peer->v_routeadv - diff;
if (diff <= (double)remain) {
- THREAD_OFF(peer->t_routeadv);
+ EVENT_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, diff);
}
}
}
/* The maxmed onstartup timer expiry callback. */
-static void bgp_maxmed_onstartup_timer(struct thread *thread)
+static void bgp_maxmed_onstartup_timer(struct event *thread)
{
struct bgp *bgp;
zlog_info("Max med on startup ended - timer expired.");
- bgp = THREAD_ARG(thread);
- THREAD_OFF(bgp->t_maxmed_onstartup);
+ bgp = EVENT_ARG(thread);
+ EVENT_OFF(bgp->t_maxmed_onstartup);
bgp->maxmed_onstartup_over = 1;
bgp_maxmed_update(bgp);
zlog_info("Begin maxmed onstartup mode - timer %d seconds",
bgp->v_maxmed_onstartup);
- thread_add_timer(bm->master, bgp_maxmed_onstartup_timer, bgp,
- bgp->v_maxmed_onstartup, &bgp->t_maxmed_onstartup);
+ event_add_timer(bm->master, bgp_maxmed_onstartup_timer, bgp,
+ bgp->v_maxmed_onstartup, &bgp->t_maxmed_onstartup);
if (!bgp->v_maxmed_admin) {
bgp->maxmed_active = 1;
}
/* The update delay timer expiry callback. */
-static void bgp_update_delay_timer(struct thread *thread)
+static void bgp_update_delay_timer(struct event *thread)
{
struct bgp *bgp;
zlog_info("Update delay ended - timer expired.");
- bgp = THREAD_ARG(thread);
- THREAD_OFF(bgp->t_update_delay);
+ bgp = EVENT_ARG(thread);
+ EVENT_OFF(bgp->t_update_delay);
bgp_update_delay_end(bgp);
}
/* The establish wait timer expiry callback. */
-static void bgp_establish_wait_timer(struct thread *thread)
+static void bgp_establish_wait_timer(struct event *thread)
{
struct bgp *bgp;
zlog_info("Establish wait - timer expired.");
- bgp = THREAD_ARG(thread);
- THREAD_OFF(bgp->t_establish_wait);
+ bgp = EVENT_ARG(thread);
+ EVENT_OFF(bgp->t_establish_wait);
bgp_check_update_delay(bgp);
}
peer->update_delay_over = 0;
/* Start the update-delay timer */
- thread_add_timer(bm->master, bgp_update_delay_timer, bgp,
- bgp->v_update_delay, &bgp->t_update_delay);
+ event_add_timer(bm->master, bgp_update_delay_timer, bgp,
+ bgp->v_update_delay, &bgp->t_update_delay);
if (bgp->v_establish_wait != bgp->v_update_delay)
- thread_add_timer(bm->master, bgp_establish_wait_timer, bgp,
- bgp->v_establish_wait, &bgp->t_establish_wait);
+ event_add_timer(bm->master, bgp_establish_wait_timer, bgp,
+ bgp->v_establish_wait, &bgp->t_establish_wait);
frr_timestamp(3, bgp->update_delay_begin_time,
sizeof(bgp->update_delay_begin_time));
/* graceful restart */
if (peer->t_gr_stale) {
- THREAD_OFF(peer->t_gr_stale);
+ EVENT_OFF(peer->t_gr_stale);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP graceful restart stalepath timer stopped",
/* Stop route-refresh stalepath timer */
if (peer->t_refresh_stalepath) {
- THREAD_OFF(peer->t_refresh_stalepath);
+ EVENT_OFF(peer->t_refresh_stalepath);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
/* There is no pending EOR message */
if (gr_info->eor_required == 0) {
if (gr_info->t_select_deferral) {
- void *info = THREAD_ARG(
+ void *info = EVENT_ARG(
gr_info->t_select_deferral);
XFREE(MTYPE_TMP, info);
}
- THREAD_OFF(gr_info->t_select_deferral);
+ EVENT_OFF(gr_info->t_select_deferral);
gr_info->eor_received = 0;
}
}
bgp_writes_off(peer);
bgp_reads_off(peer);
- THREAD_OFF(peer->t_connect_check_r);
- THREAD_OFF(peer->t_connect_check_w);
+ EVENT_OFF(peer->t_connect_check_r);
+ EVENT_OFF(peer->t_connect_check_w);
/* Stop all timers. */
- THREAD_OFF(peer->t_start);
- THREAD_OFF(peer->t_connect);
- THREAD_OFF(peer->t_holdtime);
- THREAD_OFF(peer->t_routeadv);
- THREAD_OFF(peer->t_delayopen);
+ EVENT_OFF(peer->t_start);
+ EVENT_OFF(peer->t_connect);
+ EVENT_OFF(peer->t_holdtime);
+ EVENT_OFF(peer->t_routeadv);
+ EVENT_OFF(peer->t_delayopen);
/* Clear input and output buffer. */
frr_with_mutex (&peer->io_mtx) {
* when the connection is established. A read event is triggered when the
* connection is closed. Thus we need to cancel whichever one did not occur.
*/
-static void bgp_connect_check(struct thread *thread)
+static void bgp_connect_check(struct event *thread)
{
int status;
socklen_t slen;
int ret;
struct peer *peer;
- peer = THREAD_ARG(thread);
+ peer = EVENT_ARG(thread);
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_READS_ON));
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON));
assert(!peer->t_read);
assert(!peer->t_write);
- THREAD_OFF(peer->t_connect_check_r);
- THREAD_OFF(peer->t_connect_check_w);
+ EVENT_OFF(peer->t_connect_check_r);
+ EVENT_OFF(peer->t_connect_check_w);
/* Check file descriptor. */
slen = sizeof(status);
* bgp_connect_check() as the handler for each and cancel the
* unused event in that function.
*/
- thread_add_read(bm->master, bgp_connect_check, peer, peer->fd,
- &peer->t_connect_check_r);
- thread_add_write(bm->master, bgp_connect_check, peer, peer->fd,
- &peer->t_connect_check_w);
+ event_add_read(bm->master, bgp_connect_check, peer, peer->fd,
+ &peer->t_connect_check_r);
+ event_add_write(bm->master, bgp_connect_check, peer, peer->fd,
+ &peer->t_connect_check_w);
break;
}
return BGP_FSM_SUCCESS;
bgp_fsm_delayopen_timer_expire(struct peer *peer)
{
/* Stop the DelayOpenTimer */
- THREAD_OFF(peer->t_delayopen);
+ EVENT_OFF(peer->t_delayopen);
/* Send open message to peer */
bgp_open_send(peer);
thread_info->safi = safi;
thread_info->bgp = bgp;
- thread_add_timer(bm->master, bgp_graceful_deferral_timer_expire,
- thread_info, bgp->select_defer_time,
- &gr_info->t_select_deferral);
+ event_add_timer(bm->master, bgp_graceful_deferral_timer_expire,
+ thread_info, bgp->select_defer_time,
+ &gr_info->t_select_deferral);
}
gr_info->eor_required++;
/* Send message to RIB indicating route update pending */
else {
UNSET_FLAG(peer->sflags, PEER_STATUS_NSF_MODE);
if (peer->t_gr_stale) {
- THREAD_OFF(peer->t_gr_stale);
+ EVENT_OFF(peer->t_gr_stale);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP graceful restart stalepath timer stopped",
}
if (peer->t_gr_restart) {
- THREAD_OFF(peer->t_gr_restart);
+ EVENT_OFF(peer->t_gr_restart);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP graceful restart timer stopped", peer);
}
*/
FOREACH_AFI_SAFI (afi, safi) {
if (peer->t_llgr_stale[afi][safi]) {
- THREAD_OFF(peer->t_llgr_stale[afi][safi]);
+ EVENT_OFF(peer->t_llgr_stale[afi][safi]);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP Long-lived stale timer stopped for afi/safi: %d/%d",
* of read-only mode.
*/
if (!bgp_update_delay_active(peer->bgp)) {
- THREAD_OFF(peer->t_routeadv);
+ EVENT_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
}
/* Keepalive packet is received. */
static enum bgp_fsm_state_progress bgp_fsm_keepalive(struct peer *peer)
{
- THREAD_OFF(peer->t_holdtime);
+ EVENT_OFF(peer->t_holdtime);
return BGP_FSM_SUCCESS;
}
/* Update packet is received. */
static enum bgp_fsm_state_progress bgp_fsm_update(struct peer *peer)
{
- THREAD_OFF(peer->t_holdtime);
+ EVENT_OFF(peer->t_holdtime);
return BGP_FSM_SUCCESS;
}
break;
case Connect:
if (!has_valid_nexthops) {
- THREAD_OFF(peer->t_connect);
+ EVENT_OFF(peer->t_connect);
BGP_EVENT_ADD(peer, TCP_fatal_error);
}
break;
case Active:
if (has_valid_nexthops) {
- THREAD_OFF(peer->t_connect);
+ EVENT_OFF(peer->t_connect);
BGP_EVENT_ADD(peer, ConnectRetry_timer_expired);
}
break;
};
/* Execute event process. */
-void bgp_event(struct thread *thread)
+void bgp_event(struct event *thread)
{
enum bgp_fsm_events event;
struct peer *peer;
- peer = THREAD_ARG(thread);
- event = THREAD_VAL(thread);
+ peer = EVENT_ARG(thread);
+ event = EVENT_VAL(thread);
peer_lock(peer);
bgp_event_update(peer, event);
#define BGP_TIMER_ON(T, F, V) \
do { \
if ((peer->status != Deleted)) \
- thread_add_timer(bm->master, (F), peer, (V), &(T)); \
+ event_add_timer(bm->master, (F), peer, (V), &(T)); \
} while (0)
#define BGP_EVENT_ADD(P, E) \
do { \
if ((P)->status != Deleted) \
- thread_add_event(bm->master, bgp_event, (P), (E), \
- NULL); \
+ event_add_event(bm->master, bgp_event, (P), (E), \
+ NULL); \
} while (0)
#define BGP_EVENT_FLUSH(P) \
do { \
assert(peer); \
- thread_cancel_event_ready(bm->master, (P)); \
+ event_cancel_event_ready(bm->master, (P)); \
} while (0)
-#define BGP_UPDATE_GROUP_TIMER_ON(T, F) \
- do { \
- if (BGP_SUPPRESS_FIB_ENABLED(peer->bgp) && \
- PEER_ROUTE_ADV_DELAY(peer)) \
- thread_add_timer_msec(bm->master, (F), peer, \
- (BGP_DEFAULT_UPDATE_ADVERTISEMENT_TIME * 1000),\
- (T)); \
- else \
- thread_add_timer_msec(bm->master, (F), peer, \
- 0, (T)); \
- } while (0) \
+#define BGP_UPDATE_GROUP_TIMER_ON(T, F) \
+ do { \
+ if (BGP_SUPPRESS_FIB_ENABLED(peer->bgp) && \
+ PEER_ROUTE_ADV_DELAY(peer)) \
+ event_add_timer_msec( \
+ bm->master, (F), peer, \
+ (BGP_DEFAULT_UPDATE_ADVERTISEMENT_TIME * \
+ 1000), \
+ (T)); \
+ else \
+ event_add_timer_msec(bm->master, (F), peer, 0, (T)); \
+ } while (0)
#define BGP_MSEC_JITTER 10
* Update FSM for peer based on whether we have valid nexthops or not.
*/
extern void bgp_fsm_nht_update(struct peer *peer, bool has_valid_nexthops);
-extern void bgp_event(struct thread *);
+extern void bgp_event(struct event *event);
extern int bgp_event_update(struct peer *, enum bgp_fsm_events event);
extern int bgp_stop(struct peer *peer);
extern void bgp_timer_set(struct peer *);
-extern void bgp_routeadv_timer(struct thread *);
+extern void bgp_routeadv_timer(struct event *event);
extern void bgp_fsm_change_status(struct peer *peer,
enum bgp_fsm_status status);
extern const char *const peer_down_str[];
#include "network.h" // for ERRNO_IO_RETRY
#include "stream.h" // for stream_get_endp, stream_getw_from, str...
#include "ringbuf.h" // for ringbuf_remain, ringbuf_peek, ringbuf_...
-#include "thread.h" // for THREAD_OFF, THREAD_ARG, thread...
+#include "frrevent.h" // for EVENT_OFF, EVENT_ARG, thread...
#include "bgpd/bgp_io.h"
#include "bgpd/bgp_debug.h" // for bgp_debug_neighbor_events, bgp_type_str
/* forward declarations */
static uint16_t bgp_write(struct peer *);
static uint16_t bgp_read(struct peer *peer, int *code_p);
-static void bgp_process_writes(struct thread *);
-static void bgp_process_reads(struct thread *);
+static void bgp_process_writes(struct event *event);
+static void bgp_process_reads(struct event *event);
static bool validate_header(struct peer *);
/* generic i/o status codes */
assert(!peer->t_connect_check_w);
assert(peer->fd);
- thread_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
- &peer->t_write);
+ event_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
+ &peer->t_write);
SET_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON);
}
struct frr_pthread *fpt = bgp_pth_io;
assert(fpt->running);
- thread_cancel_async(fpt->master, &peer->t_write, NULL);
- THREAD_OFF(peer->t_generate_updgrp_packets);
+ event_cancel_async(fpt->master, &peer->t_write, NULL);
+ EVENT_OFF(peer->t_generate_updgrp_packets);
UNSET_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON);
}
assert(!peer->t_connect_check_w);
assert(peer->fd);
- thread_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
- &peer->t_read);
+ event_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
+ &peer->t_read);
SET_FLAG(peer->thread_flags, PEER_THREAD_READS_ON);
}
struct frr_pthread *fpt = bgp_pth_io;
assert(fpt->running);
- thread_cancel_async(fpt->master, &peer->t_read, NULL);
- THREAD_OFF(peer->t_process_packet);
- THREAD_OFF(peer->t_process_packet_error);
+ event_cancel_async(fpt->master, &peer->t_read, NULL);
+ EVENT_OFF(peer->t_process_packet);
+ EVENT_OFF(peer->t_process_packet_error);
UNSET_FLAG(peer->thread_flags, PEER_THREAD_READS_ON);
}
/*
* Called from I/O pthread when a file descriptor has become ready for writing.
*/
-static void bgp_process_writes(struct thread *thread)
+static void bgp_process_writes(struct event *thread)
{
static struct peer *peer;
- peer = THREAD_ARG(thread);
+ peer = EVENT_ARG(thread);
uint16_t status;
bool reschedule;
bool fatal = false;
* sent in the update message
*/
if (reschedule) {
- thread_add_write(fpt->master, bgp_process_writes, peer,
- peer->fd, &peer->t_write);
+ event_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
+ &peer->t_write);
} else if (!fatal) {
BGP_UPDATE_GROUP_TIMER_ON(&peer->t_generate_updgrp_packets,
bgp_generate_updgrp_packets);
* We read as much data as possible, process as many packets as we can and
* place them on peer->ibuf for secondary processing by the main thread.
*/
-static void bgp_process_reads(struct thread *thread)
+static void bgp_process_reads(struct event *thread)
{
/* clang-format off */
static struct peer *peer; /* peer to read from */
int ret = 1;
/* clang-format on */
- peer = THREAD_ARG(thread);
+ peer = EVENT_ARG(thread);
if (peer->fd < 0 || bm->terminating)
return;
/* Handle the error in the main pthread, include the
* specific state change from 'bgp_read'.
*/
- thread_add_event(bm->master, bgp_packet_process_error,
- peer, code, &peer->t_process_packet_error);
+ event_add_event(bm->master, bgp_packet_process_error, peer,
+ code, &peer->t_process_packet_error);
goto done;
}
if (!ibuf_full)
assert(ringbuf_space(peer->ibuf_work) >= peer->max_packet_size);
- thread_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
- &peer->t_read);
+ event_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
+ &peer->t_read);
if (added_pkt)
- thread_add_event(bm->master, bgp_process_packet, peer, 0,
- &peer->t_process_packet);
+ event_add_event(bm->master, bgp_process_packet, peer, 0,
+ &peer->t_process_packet);
}
/*
#include "memory.h" // for MTYPE_TMP, XFREE, XCALLOC, XMALLOC
#include "monotime.h" // for monotime, monotime_since
-#include "bgpd/bgpd.h" // for peer, PEER_THREAD_KEEPALIVES_ON, peer...
+#include "bgpd/bgpd.h" // for peer, PEER_EVENT_KEEPALIVES_ON, peer...
#include "bgpd/bgp_debug.h" // for bgp_debug_neighbor_events
#include "bgpd/bgp_packet.h" // for bgp_keepalive_send
#include "bgpd/bgp_keepalives.h"
/* Cleanup handler / deinitializer. */
static void bgp_keepalives_finish(void *arg)
{
- if (peerhash) {
- hash_clean(peerhash, pkat_del);
- hash_free(peerhash);
- }
-
- peerhash = NULL;
+ hash_clean_and_free(&peerhash, pkat_del);
pthread_mutex_unlock(peerhash_mtx);
pthread_mutex_destroy(peerhash_mtx);
/*
* The RCU mechanism for each pthread is initialized in a "locked"
* state. That's ok for pthreads using the frr_pthread,
- * thread_fetch event loop, because that event loop unlocks regularly.
+ * event_fetch event loop, because that event loop unlocks regularly.
* For foreign pthreads, the lock needs to be unlocked so that the
* background rcu pthread can run.
*/
#include <zebra.h>
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "zclient.h"
#include "stream.h"
XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
}
-void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
+void bgp_lp_init(struct event_loop *master, struct labelpool *pool)
{
if (BGP_DEBUG(labelpool, LABELPOOL))
zlog_debug("%s: entry", __func__);
struct timeval starttime;
struct skiplist *timestamps_alloc;
struct skiplist *timestamps_dealloc;
- struct thread *event_thread;
+ struct event *event_thread;
unsigned int counter[LPT_STAT_MAX];
};
return 0;
}
-static void labelpool_test_event_handler(struct thread *thread)
+static void labelpool_test_event_handler(struct event *thread)
{
struct lp_test *tcb;
}
if (tcb->event_thread)
- thread_cancel(&tcb->event_thread);
+ event_cancel(&tcb->event_thread);
lpt_inprogress = false;
}
}
if (tcb->event_thread)
- thread_cancel(&tcb->event_thread);
+ event_cancel(&tcb->event_thread);
memset(tcb, 0, sizeof(*tcb));
uint32_t next_chunksize; /* request this many labels */
};
-extern void bgp_lp_init(struct thread_master *master, struct labelpool *pool);
+extern void bgp_lp_init(struct event_loop *master, struct labelpool *pool);
extern void bgp_lp_finish(void);
extern void bgp_lp_get(int type, void *labelid,
int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated));
void lcommunity_finish(void)
{
- hash_clean(lcomhash, (void (*)(void *))lcommunity_hash_free);
- hash_free(lcomhash);
- lcomhash = NULL;
+ hash_clean_and_free(&lcomhash, (void (*)(void *))lcommunity_hash_free);
}
/* Get next Large Communities token from the string.
void bgp_mac_finish(void)
{
- hash_clean(bm->self_mac_hash, bgp_mac_hash_free);
- hash_free(bm->self_mac_hash);
+ hash_clean_and_free(&bm->self_mac_hash, bgp_mac_hash_free);
}
static void bgp_mac_hash_interface_string_del(void *val)
#include "vector.h"
#include "command.h"
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include <lib/version.h>
#include "memory.h"
#include "prefix.h"
#include "log.h"
#include "prefix.h"
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "smux.h"
#include "filter.h"
#include "hook.h"
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "sockunion.h"
#include "sockopt.h"
#include "memory.h"
}
/* Accept bgp connection. */
-static void bgp_accept(struct thread *thread)
+static void bgp_accept(struct event *thread)
{
int bgp_sock;
int accept_sock;
union sockunion su;
- struct bgp_listener *listener = THREAD_ARG(thread);
+ struct bgp_listener *listener = EVENT_ARG(thread);
struct peer *peer;
struct peer *peer1;
char buf[SU_ADDRSTRLEN];
bgp = bgp_lookup_by_name(listener->name);
/* Register accept thread. */
- accept_sock = THREAD_FD(thread);
+ accept_sock = EVENT_FD(thread);
if (accept_sock < 0) {
flog_err_sys(EC_LIB_SOCKET,
"[Error] BGP accept socket fd is negative: %d",
return;
}
- thread_add_read(bm->master, bgp_accept, listener, accept_sock,
- &listener->thread);
+ event_add_read(bm->master, bgp_accept, listener, accept_sock,
+ &listener->thread);
/* Accept client connection. */
bgp_sock = sockunion_accept(accept_sock, &su);
"[Error] accept() failed with error \"%s\" on BGP listener socket %d for BGP instance in VRF \"%s\"; refreshing socket",
safe_strerror(save_errno), accept_sock,
VRF_LOGNAME(vrf));
- THREAD_OFF(listener->thread);
+ EVENT_OFF(listener->thread);
} else {
flog_err_sys(
EC_LIB_SOCKET,
sockopt_tcp_mss_set(bgp_sock, peer1->tcp_mss);
bgp_fsm_change_status(peer1, Active);
- THREAD_OFF(
+ EVENT_OFF(
peer1->t_start); /* created in peer_create() */
if (peer_active(peer1)) {
}
bgp_peer_reg_with_nht(peer);
bgp_fsm_change_status(peer, Active);
- THREAD_OFF(peer->t_start); /* created in peer_create() */
+ EVENT_OFF(peer->t_start); /* created in peer_create() */
SET_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER);
/* Make dummy peer until read Open packet. */
? IPV4_MAX_BITLEN
: IPV6_MAX_BITLEN;
+ if (!BGP_PEER_SU_UNSPEC(peer))
+ bgp_md5_set(peer);
+
bgp_md5_set_connect(peer->fd, &peer->su, prefixlen,
peer->password);
}
listener->bgp = bgp;
memcpy(&listener->su, sa, salen);
- thread_add_read(bm->master, bgp_accept, listener, sock,
- &listener->thread);
+ event_add_read(bm->master, bgp_accept, listener, sock,
+ &listener->thread);
listnode_add(bm->listen_sockets, listener);
return 0;
for (ALL_LIST_ELEMENTS(bm->listen_sockets, node, next, listener)) {
if (listener->bgp == bgp) {
- THREAD_OFF(listener->thread);
+ EVENT_OFF(listener->thread);
close(listener->fd);
listnode_delete(bm->listen_sockets, listener);
XFREE(MTYPE_BGP_LISTENER, listener->name);
for (ALL_LIST_ELEMENTS(bm->listen_sockets, node, next, listener)) {
if (listener->bgp)
continue;
- THREAD_OFF(listener->thread);
+ EVENT_OFF(listener->thread);
close(listener->fd);
listnode_delete(bm->listen_sockets, listener);
XFREE(MTYPE_BGP_LISTENER, listener->name);
struct bgp_listener {
int fd;
union sockunion su;
- struct thread *thread;
+ struct event *thread;
struct bgp *bgp;
char *name;
};
#include <zebra.h>
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "lib/json.h"
#include "zclient.h"
void bgp_tip_hash_destroy(struct bgp *bgp)
{
- if (bgp->tip_hash == NULL)
- return;
- hash_clean(bgp->tip_hash, bgp_tip_hash_free);
- hash_free(bgp->tip_hash);
- bgp->tip_hash = NULL;
+ hash_clean_and_free(&bgp->tip_hash, bgp_tip_hash_free);
}
/* Add/Update Tunnel-IP entry of bgp martian next-hop table.
void bgp_address_destroy(struct bgp *bgp)
{
- if (bgp->address_hash == NULL)
- return;
- hash_clean(bgp->address_hash, bgp_address_hash_free);
- hash_free(bgp->address_hash);
- bgp->address_hash = NULL;
+ hash_clean_and_free(&bgp->address_hash, bgp_address_hash_free);
}
static void bgp_address_add(struct bgp *bgp, struct connected *ifc,
#include <zebra.h>
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "zclient.h"
#include "stream.h"
static void register_zebra_rnh(struct bgp_nexthop_cache *bnc);
static void unregister_zebra_rnh(struct bgp_nexthop_cache *bnc);
static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p);
-static void bgp_nht_ifp_initial(struct thread *thread);
+static void bgp_nht_ifp_initial(struct event *thread);
static int bgp_isvalid_nexthop(struct bgp_nexthop_cache *bnc)
{
bgp_nht_ifp_handle(ifp, false);
}
-static void bgp_nht_ifp_initial(struct thread *thread)
+static void bgp_nht_ifp_initial(struct event *thread)
{
- ifindex_t ifindex = THREAD_VAL(thread);
- struct bgp *bgp = THREAD_ARG(thread);
+ ifindex_t ifindex = EVENT_VAL(thread);
+ struct bgp *bgp = EVENT_ARG(thread);
struct interface *ifp = if_lookup_by_index(ifindex, bgp->vrf_id);
if (!ifp)
return;
if (bnc->ifindex)
- thread_add_event(bm->master, bgp_nht_ifp_initial, bnc->bgp,
- bnc->ifindex, NULL);
+ event_add_event(bm->master, bgp_nht_ifp_initial, bnc->bgp,
+ bnc->ifindex, NULL);
}
void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
#include "linklist.h"
#include "prefix.h"
#include "stream.h"
-#include "thread.h"
+#include "frrevent.h"
#include "log.h"
#include "command.h"
#include "memory.h"
static int bgp_capability_llgr(struct peer *peer,
struct capability_header *caphdr)
{
+/*
+ * +--------------------------------------------------+
+ * | Address Family Identifier (16 bits) |
+ * +--------------------------------------------------+
+ * | Subsequent Address Family Identifier (8 bits) |
+ * +--------------------------------------------------+
+ * | Flags for Address Family (8 bits) |
+ * +--------------------------------------------------+
+ * | Long-lived Stale Time (24 bits) |
+ * +--------------------------------------------------+
+ */
+#define BGP_CAP_LLGR_MIN_PACKET_LEN 7
struct stream *s = BGP_INPUT(peer);
size_t end = stream_get_getp(s) + caphdr->length;
SET_FLAG(peer->cap, PEER_CAP_LLGR_RCV);
- while (stream_get_getp(s) + 4 <= end) {
+ while (stream_get_getp(s) + BGP_CAP_LLGR_MIN_PACKET_LEN <= end) {
afi_t afi;
safi_t safi;
iana_afi_t pkt_afi = stream_getw(s);
#include <zebra.h>
#include <sys/time.h>
-#include "thread.h"
+#include "frrevent.h"
#include "stream.h"
#include "network.h"
#include "prefix.h"
* update group a peer belongs to, encode this information into packets, and
* enqueue the packets onto the peer's output buffer.
*/
-void bgp_generate_updgrp_packets(struct thread *thread)
+void bgp_generate_updgrp_packets(struct event *thread)
{
- struct peer *peer = THREAD_ARG(thread);
+ struct peer *peer = EVENT_ARG(thread);
struct stream *s;
struct peer_af *paf;
return Receive_KEEPALIVE_message;
}
-static void bgp_refresh_stalepath_timer_expire(struct thread *thread)
+static void bgp_refresh_stalepath_timer_expire(struct event *thread)
{
struct peer_af *paf;
- paf = THREAD_ARG(thread);
+ paf = EVENT_ARG(thread);
afi_t afi = paf->afi;
safi_t safi = paf->safi;
"EOR RCV",
gr_info->eor_received);
if (gr_info->t_select_deferral) {
- void *info = THREAD_ARG(
+ void *info = EVENT_ARG(
gr_info->t_select_deferral);
XFREE(MTYPE_TMP, info);
}
- THREAD_OFF(gr_info->t_select_deferral);
+ EVENT_OFF(gr_info->t_select_deferral);
gr_info->eor_required = 0;
gr_info->eor_received = 0;
/* Best path selection */
}
if (peer_established(peer))
- thread_add_timer(bm->master,
- bgp_refresh_stalepath_timer_expire,
- paf, peer->bgp->stalepath_time,
- &peer->t_refresh_stalepath);
+ event_add_timer(bm->master,
+ bgp_refresh_stalepath_timer_expire, paf,
+ peer->bgp->stalepath_time,
+ &peer->t_refresh_stalepath);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
return BGP_PACKET_NOOP;
}
- THREAD_OFF(peer->t_refresh_stalepath);
+ EVENT_OFF(peer->t_refresh_stalepath);
SET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_EORR_RECEIVED);
UNSET_FLAG(peer->af_sflags[afi][safi],
* would not, making event flow difficult to understand. Please think twice
* before hacking this.
*
- * Thread type: THREAD_EVENT
+ * Thread type: EVENT_EVENT
* @param thread
* @return 0
*/
-void bgp_process_packet(struct thread *thread)
+void bgp_process_packet(struct event *thread)
{
/* Yes first of all get peer pointer. */
struct peer *peer; // peer
int fsm_update_result; // return code of bgp_event_update()
int mprc; // message processing return code
- peer = THREAD_ARG(thread);
+ peer = EVENT_ARG(thread);
rpkt_quanta_old = atomic_load_explicit(&peer->bgp->rpkt_quanta,
memory_order_relaxed);
fsm_update_result = 0;
frr_with_mutex (&peer->io_mtx) {
// more work to do, come back later
if (peer->ibuf->count > 0)
- thread_add_event(
- bm->master, bgp_process_packet, peer, 0,
- &peer->t_process_packet);
+ event_add_event(bm->master, bgp_process_packet,
+ peer, 0,
+ &peer->t_process_packet);
}
}
}
* having the io pthread try to enqueue fsm events or mess with the peer
* struct.
*/
-void bgp_packet_process_error(struct thread *thread)
+void bgp_packet_process_error(struct event *thread)
{
struct peer *peer;
int code;
- peer = THREAD_ARG(thread);
- code = THREAD_VAL(thread);
+ peer = EVENT_ARG(thread);
+ code = EVENT_VAL(thread);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [Event] BGP error %d on fd %d",
extern int bgp_packet_set_marker(struct stream *s, uint8_t type);
extern void bgp_packet_set_size(struct stream *s);
-extern void bgp_generate_updgrp_packets(struct thread *);
-extern void bgp_process_packet(struct thread *);
+extern void bgp_generate_updgrp_packets(struct event *event);
+extern void bgp_process_packet(struct event *event);
extern void bgp_send_delayed_eor(struct bgp *bgp);
/* Task callback to handle socket error encountered in the io pthread */
-void bgp_packet_process_error(struct thread *thread);
+void bgp_packet_process_error(struct event *thread);
extern struct bgp_notify
bgp_notify_decapsulate_hard_reset(struct bgp_notify *notify);
extern bool bgp_has_graceful_restart_notification(struct peer *peer);
bpm->action = NULL;
}
}
- hash_free(bpm->entry_hash);
+ hash_clean_and_free(&bpm->entry_hash, NULL);
XFREE(MTYPE_PBR_MATCH, bpm);
}
void bgp_pbr_cleanup(struct bgp *bgp)
{
- if (bgp->pbr_match_hash) {
- hash_clean(bgp->pbr_match_hash, bgp_pbr_match_free);
- hash_free(bgp->pbr_match_hash);
- bgp->pbr_match_hash = NULL;
- }
- if (bgp->pbr_rule_hash) {
- hash_clean(bgp->pbr_rule_hash, bgp_pbr_rule_free);
- hash_free(bgp->pbr_rule_hash);
- bgp->pbr_rule_hash = NULL;
- }
- if (bgp->pbr_action_hash) {
- hash_clean(bgp->pbr_action_hash, bgp_pbr_action_free);
- hash_free(bgp->pbr_action_hash);
- bgp->pbr_action_hash = NULL;
- }
+ hash_clean_and_free(&bgp->pbr_match_hash, bgp_pbr_match_free);
+ hash_clean_and_free(&bgp->pbr_rule_hash, bgp_pbr_rule_free);
+ hash_clean_and_free(&bgp->pbr_action_hash, bgp_pbr_action_free);
+
if (bgp->bgp_pbr_cfg == NULL)
return;
+
bgp_pbr_reset(bgp, AFI_IP);
bgp_pbr_reset(bgp, AFI_IP6);
XFREE(MTYPE_PBR, bgp->bgp_pbr_cfg);
#include "buffer.h"
#include "sockunion.h"
#include "plist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "workqueue.h"
#include "queue.h"
#include "memory.h"
return 0;
if (CHECK_FLAG(dest->flags, BGP_NODE_PROCESS_SCHEDULED)) {
- if (BGP_DEBUG(update, UPDATE_OUT))
+ if (BGP_DEBUG(update, UPDATE_OUT)) {
+ table = bgp_dest_table(dest);
+ if (table)
+ bgp = table->bgp;
+
zlog_debug(
- "Route %pBD is in workqueue and being processed, not deferred.",
- dest);
+ "Route %pBD(%s) is in workqueue and being processed, not deferred.",
+ dest, bgp ? bgp->name_pretty : "(Unknown)");
+ }
return 0;
}
bgp->gr_info[afi][safi].gr_deferred++;
SET_FLAG(dest->flags, BGP_NODE_SELECT_DEFER);
if (BGP_DEBUG(update, UPDATE_OUT))
- zlog_debug("DEFER route %pBD, dest %p", dest,
- dest);
+ zlog_debug("DEFER route %pBD(%s), dest %p",
+ dest, bgp->name_pretty, dest);
return 0;
}
}
if (bgp_debug_update(NULL, p, subgrp->update_group, 0))
zlog_debug(
"%pBP [Update:SEND] %pFX is filtered by route-map '%s'",
- peer, p, ROUTE_MAP_OUT_NAME(filter));
+ peer, p,
+ bgp_path_suppressed(pi)
+ ? UNSUPPRESS_MAP_NAME(filter)
+ : ROUTE_MAP_OUT_NAME(filter));
bgp_attr_flush(rmap_path.attr);
return false;
}
return true;
}
-static void bgp_route_select_timer_expire(struct thread *thread)
+static void bgp_route_select_timer_expire(struct event *thread)
{
struct afi_safi_info *info;
afi_t afi;
safi_t safi;
struct bgp *bgp;
- info = THREAD_ARG(thread);
+ info = EVENT_ARG(thread);
afi = info->afi;
safi = info->safi;
bgp = info->bgp;
bgp_path_info_reap(dest, pi);
if (debug)
- zlog_debug("%s: pi %p in holddown", __func__,
- pi);
+ zlog_debug(
+ "%s: %pBD(%s) pi from %s in holddown",
+ __func__, dest, bgp->name_pretty,
+ pi->peer->host);
continue;
}
if (debug)
zlog_debug(
- "%s: pi %p non self peer %s not estab state",
- __func__, pi, pi->peer->host);
+ "%s: %pBD(%s) non self peer %s not estab state",
+ __func__, dest,
+ bgp->name_pretty,
+ pi->peer->host);
continue;
}
&& (!CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED))) {
bgp_path_info_unset_flag(dest, pi, BGP_PATH_DMED_CHECK);
if (debug)
- zlog_debug("%s: pi %p dmed", __func__, pi);
+ zlog_debug("%s: %pBD(%s) pi %s dmed", __func__,
+ dest, bgp->name_pretty,
+ pi->peer->host);
continue;
}
if (!bgp_path_info_nexthop_cmp(pi, new_select)) {
if (debug)
zlog_debug(
- "%pBD: %s has the same nexthop as the bestpath, skip it",
- dest, path_buf);
+ "%pBD(%s): %s has the same nexthop as the bestpath, skip it",
+ dest, bgp->name_pretty,
+ path_buf);
continue;
}
if (paths_eq) {
if (debug)
zlog_debug(
- "%pBD: %s is equivalent to the bestpath, add to the multipath list",
- dest, path_buf);
+ "%pBD(%s): %s is equivalent to the bestpath, add to the multipath list",
+ dest, bgp->name_pretty,
+ path_buf);
bgp_mp_list_add(&mp_list, pi);
}
}
debug = bgp_debug_bestpath(dest);
if (debug)
zlog_debug(
- "%s: bgp delete in progress, ignoring event, p=%pBD",
- __func__, dest);
+ "%s: bgp delete in progress, ignoring event, p=%pBD(%s)",
+ __func__, dest, bgp->name_pretty);
return;
}
/* Is it end of initial update? (after startup) */
debug = bgp_debug_bestpath(dest);
if (debug)
- zlog_debug("%s: p=%pBDi(%s) afi=%s, safi=%s start", __func__,
+ zlog_debug("%s: p=%pBD(%s) afi=%s, safi=%s start", __func__,
dest, bgp->name_pretty, afi2str(afi),
safi2str(safi));
*/
if (CHECK_FLAG(dest->flags, BGP_NODE_SELECT_DEFER)) {
if (BGP_DEBUG(update, UPDATE_OUT))
- zlog_debug("SELECT_DEFER flag set for route %p", dest);
+ zlog_debug("SELECT_DEFER flag set for route %p(%s)",
+ dest, bgp->name_pretty);
return;
}
if (bgp_fibupd_safi(safi)
&& !bgp_option_check(BGP_OPT_NO_FIB)) {
- if (BGP_SUPPRESS_FIB_ENABLED(bgp)
- && new_select->sub_type == BGP_ROUTE_NORMAL)
- SET_FLAG(dest->flags,
- BGP_NODE_FIB_INSTALL_PENDING);
-
if (new_select->type == ZEBRA_ROUTE_BGP
&& (new_select->sub_type == BGP_ROUTE_NORMAL
|| new_select->sub_type
if (!bgp->t_rmap_def_originate_eval) {
bgp_lock(bgp);
- thread_add_timer(
+ event_add_timer(
bm->master,
update_group_refresh_default_originate_route_map,
bgp, RMAP_DEFAULT_ORIGINATE_EVAL_TIMER,
|| new_select->sub_type == BGP_ROUTE_AGGREGATE
|| new_select->sub_type == BGP_ROUTE_IMPORTED)) {
- if (BGP_SUPPRESS_FIB_ENABLED(bgp))
- SET_FLAG(dest->flags,
- BGP_NODE_FIB_INSTALL_PENDING);
-
/* if this is an evpn imported type-5 prefix,
* we need to withdraw the route first to clear
* the nh neigh and the RMAC entry.
struct afi_safi_info *thread_info;
if (bgp->gr_info[afi][safi].t_route_select) {
- struct thread *t = bgp->gr_info[afi][safi].t_route_select;
+ struct event *t = bgp->gr_info[afi][safi].t_route_select;
- thread_info = THREAD_ARG(t);
+ thread_info = EVENT_ARG(t);
XFREE(MTYPE_TMP, thread_info);
- THREAD_OFF(bgp->gr_info[afi][safi].t_route_select);
+ EVENT_OFF(bgp->gr_info[afi][safi].t_route_select);
}
if (BGP_DEBUG(update, UPDATE_OUT)) {
/* If there are more routes to be processed, start the
* selection timer
*/
- thread_add_timer(bm->master, bgp_route_select_timer_expire, thread_info,
+ event_add_timer(bm->master, bgp_route_select_timer_expire, thread_info,
BGP_ROUTE_SELECT_DELAY,
&bgp->gr_info[afi][safi].t_route_select);
}
work_queue_add(bgp->process_queue, pqnode);
}
-static void bgp_maximum_prefix_restart_timer(struct thread *thread)
+static void bgp_maximum_prefix_restart_timer(struct event *thread)
{
struct peer *peer;
- peer = THREAD_ARG(thread);
+ peer = EVENT_ARG(thread);
peer->t_pmax_restart = NULL;
if (bgp_debug_neighbor_events(peer))
(type == ZEBRA_ROUTE_BGP && stype == BGP_ROUTE_STATIC) ? true
: false;
+ /* If `bgp allow-martian-nexthop` is turned on, return next-hop
+ * as good.
+ */
+ if (bgp->allow_martian)
+ return false;
+
/*
* Only validated for unicast and multicast currently.
* Also valid for EVPN where the nexthop is an IP address.
bgp_attr_flush(&new_attr);
goto filtered;
}
- /* The flag BGP_NODE_FIB_INSTALL_PENDING is for the following
- * condition :
- * Suppress fib is enabled
- * BGP_OPT_NO_FIB is not enabled
- * Route type is BGP_ROUTE_NORMAL (peer learnt routes)
- * Route is being installed first time (BGP_NODE_FIB_INSTALLED not set)
- */
- if (bgp_fibupd_safi(safi) && BGP_SUPPRESS_FIB_ENABLED(bgp)
- && (sub_type == BGP_ROUTE_NORMAL)
- && (!bgp_option_check(BGP_OPT_NO_FIB))
- && (!CHECK_FLAG(dest->flags, BGP_NODE_FIB_INSTALLED)))
- SET_FLAG(dest->flags, BGP_NODE_FIB_INSTALL_PENDING);
/* If neighbor soo is configured, tag all incoming routes with
* this SoO tag and then filter out advertisements in
if (!paf->t_announce_route)
return;
- THREAD_OFF(paf->t_announce_route);
+ EVENT_OFF(paf->t_announce_route);
}
/*
* Callback that is invoked when the route announcement timer for a
* peer_af expires.
*/
-static void bgp_announce_route_timer_expired(struct thread *t)
+static void bgp_announce_route_timer_expired(struct event *t)
{
struct peer_af *paf;
struct peer *peer;
- paf = THREAD_ARG(t);
+ paf = EVENT_ARG(t);
peer = paf->peer;
if (!peer_established(peer))
* multiple peers and the announcement doesn't happen in the
* vty context.
*/
- thread_add_timer_msec(bm->master, bgp_announce_route_timer_expired, paf,
- (subgrp->peer_count == 1)
- ? BGP_ANNOUNCE_ROUTE_SHORT_DELAY_MS
- : BGP_ANNOUNCE_ROUTE_DELAY_MS,
- &paf->t_announce_route);
+ event_add_timer_msec(bm->master, bgp_announce_route_timer_expired, paf,
+ (subgrp->peer_count == 1)
+ ? BGP_ANNOUNCE_ROUTE_SHORT_DELAY_MS
+ : BGP_ANNOUNCE_ROUTE_DELAY_MS,
+ &paf->t_announce_route);
}
/*
* Without splitting the full job into several part,
* vtysh waits for the job to finish before responding to a BGP command
*/
-static void bgp_soft_reconfig_table_task(struct thread *thread)
+static void bgp_soft_reconfig_table_task(struct event *thread)
{
uint32_t iter, max_iter;
struct bgp_dest *dest;
struct prefix_rd *prd;
struct listnode *node, *nnode;
- table = THREAD_ARG(thread);
+ table = EVENT_ARG(thread);
prd = NULL;
max_iter = SOFT_RECONFIG_TASK_MAX_PREFIX;
*/
if (dest || table->soft_reconfig_init) {
table->soft_reconfig_init = false;
- thread_add_event(bm->master, bgp_soft_reconfig_table_task,
- table, 0, &table->soft_reconfig_thread);
+ event_add_event(bm->master, bgp_soft_reconfig_table_task, table,
+ 0, &table->soft_reconfig_thread);
return;
}
/* we're done, clean up the background iteration context info and
list_delete(&ntable->soft_reconfig_peers);
bgp_soft_reconfig_table_flag(ntable, false);
- THREAD_OFF(ntable->soft_reconfig_thread);
+ EVENT_OFF(ntable->soft_reconfig_thread);
}
}
bgp_soft_reconfig_table_flag(table, true);
if (!table->soft_reconfig_thread)
- thread_add_event(bm->master,
- bgp_soft_reconfig_table_task, table, 0,
- &table->soft_reconfig_thread);
+ event_add_event(bm->master,
+ bgp_soft_reconfig_table_task, table, 0,
+ &table->soft_reconfig_thread);
/* Cancel bgp_announce_route_timer_expired threads.
* bgp_announce_route_timer_expired threads have been scheduled
* to announce routes as soon as the soft_reconfigure process
if (aggregate->community)
community_free(&aggregate->community);
- if (aggregate->community_hash) {
- /* Delete all communities in the hash.
- */
- hash_clean(aggregate->community_hash,
- bgp_aggr_community_remove);
- /* Free up the community_hash.
- */
- hash_free(aggregate->community_hash);
- }
+ hash_clean_and_free(&aggregate->community_hash,
+ bgp_aggr_community_remove);
if (aggregate->ecommunity)
ecommunity_free(&aggregate->ecommunity);
- if (aggregate->ecommunity_hash) {
- /* Delete all ecommunities in the hash.
- */
- hash_clean(aggregate->ecommunity_hash,
- bgp_aggr_ecommunity_remove);
- /* Free up the ecommunity_hash.
- */
- hash_free(aggregate->ecommunity_hash);
- }
+ hash_clean_and_free(&aggregate->ecommunity_hash,
+ bgp_aggr_ecommunity_remove);
if (aggregate->lcommunity)
lcommunity_free(&aggregate->lcommunity);
- if (aggregate->lcommunity_hash) {
- /* Delete all lcommunities in the hash.
- */
- hash_clean(aggregate->lcommunity_hash,
- bgp_aggr_lcommunity_remove);
- /* Free up the lcommunity_hash.
- */
- hash_free(aggregate->lcommunity_hash);
- }
+ hash_clean_and_free(&aggregate->lcommunity_hash,
+ bgp_aggr_lcommunity_remove);
if (aggregate->aspath)
aspath_free(aggregate->aspath);
- if (aggregate->aspath_hash) {
- /* Delete all as-paths in the hash.
- */
- hash_clean(aggregate->aspath_hash,
- bgp_aggr_aspath_remove);
- /* Free up the aspath_hash.
- */
- hash_free(aggregate->aspath_hash);
- }
+ hash_clean_and_free(&aggregate->aspath_hash, bgp_aggr_aspath_remove);
bgp_aggregate_free(aggregate);
bgp_dest_unlock_node(dest);
if (path->peer->t_gr_restart &&
CHECK_FLAG(path->flags, BGP_PATH_STALE)) {
unsigned long gr_remaining =
- thread_timer_remain_second(path->peer->t_gr_restart);
+ event_timer_remain_second(path->peer->t_gr_restart);
if (json_paths) {
json_object_int_add(json_path,
bgp_attr_get_community(attr) &&
community_include(bgp_attr_get_community(attr),
COMMUNITY_LLGR_STALE)) {
- unsigned long llgr_remaining = thread_timer_remain_second(
+ unsigned long llgr_remaining = event_timer_remain_second(
path->peer->t_llgr_stale[afi][safi]);
if (json_paths) {
}
}
-static void bgp_table_stats_walker(struct thread *t)
+static void bgp_table_stats_walker(struct event *t)
{
struct bgp_dest *dest, *ndest;
struct bgp_dest *top;
- struct bgp_table_stats *ts = THREAD_ARG(t);
+ struct bgp_table_stats *ts = EVENT_ARG(t);
unsigned int space = 0;
if (!(top = bgp_table_top(ts->table)))
memset(&ts, 0, sizeof(ts));
ts.table = bgp->rib[afi][safi];
- thread_execute(bm->master, bgp_table_stats_walker, &ts, 0);
+ event_execute(bm->master, bgp_table_stats_walker, &ts, 0);
for (i = 0; i < BGP_STATS_MAX; i++) {
if ((!json && !table_stats_strs[i][TABLE_STATS_IDX_VTY])
}
}
-static void bgp_peer_count_walker(struct thread *t)
+static void bgp_peer_count_walker(struct event *t)
{
struct bgp_dest *rn, *rm;
const struct bgp_table *table;
- struct peer_pcounts *pc = THREAD_ARG(t);
+ struct peer_pcounts *pc = EVENT_ARG(t);
if (pc->safi == SAFI_MPLS_VPN || pc->safi == SAFI_ENCAP
|| pc->safi == SAFI_EVPN) {
* stats for the thread-walk (i.e. ensure this can't be blamed on
* on just vty_read()).
*/
- thread_execute(bm->master, bgp_peer_count_walker, &pcounts, 0);
+ event_execute(bm->master, bgp_peer_count_walker, &pcounts, 0);
if (use_json) {
json_object_string_add(json, "prefixCountsFor", peer->host);
} export;
struct {
- struct thread *timer;
+ struct event *timer;
void *hme; /* encap monitor, if this is a VPN route */
struct prefix_rd
rd; /* import: route's route-distinguisher */
vpn_policy_routemap_event(rmap_name);
}
-void bgp_route_map_update_timer(struct thread *thread)
+void bgp_route_map_update_timer(struct event *thread)
{
route_map_walk_update_list(bgp_route_map_process_update_cb);
}
/* If new update is received before the current timer timed out,
* turn it off and start a new timer.
*/
- THREAD_OFF(bm->t_rmap_update);
+ EVENT_OFF(bm->t_rmap_update);
/* rmap_update_timer of 0 means don't do route updates */
if (bm->rmap_update_timer) {
- thread_add_timer(bm->master, bgp_route_map_update_timer,
- NULL, bm->rmap_update_timer,
- &bm->t_rmap_update);
+ event_add_timer(bm->master, bgp_route_map_update_timer, NULL,
+ bm->rmap_update_timer, &bm->t_rmap_update);
/* Signal the groups that a route-map update event has
* started */
DEFUN_YANG (set_distance,
set_distance_cmd,
- "set distance (0-255)",
+ "set distance (1-255)",
SET_STR
"BGP Administrative Distance to use\n"
"Distance value\n")
DEFUN_YANG (no_set_distance,
no_set_distance_cmd,
- "no set distance [(0-255)]",
+ "no set distance [(1-255)]",
NO_STR SET_STR
"BGP Administrative Distance to use\n"
"Distance value\n")
#include "command.h"
#include "linklist.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "filter.h"
#include "bgpd/bgpd.h"
#include "bgpd/bgp_table.h"
#include "northbound_cli.h"
#include "lib/network.h"
-#include "lib/thread.h"
#include "rtrlib/rtrlib.h"
#include "hook.h"
#include "libfrr.h"
#define RETRY_INTERVAL_DEFAULT 600
#define BGP_RPKI_CACHE_SERVER_SYNC_RETRY_TIMEOUT 3
-static struct thread *t_rpki_sync;
+static struct event *t_rpki_sync;
#define RPKI_DEBUG(...) \
if (rpki_debug) { \
safi_t safi;
};
-static void rpki_revalidate_prefix(struct thread *thread)
+static void rpki_revalidate_prefix(struct event *thread)
{
- struct rpki_revalidate_prefix *rrp = THREAD_ARG(thread);
+ struct rpki_revalidate_prefix *rrp = EVENT_ARG(thread);
struct bgp_dest *match, *node;
match = bgp_table_subtree_lookup(rrp->bgp->rib[rrp->afi][rrp->safi],
XFREE(MTYPE_BGP_RPKI_REVALIDATE, rrp);
}
-static void bgpd_sync_callback(struct thread *thread)
+static void bgpd_sync_callback(struct event *thread)
{
struct bgp *bgp;
struct listnode *node;
struct prefix prefix;
struct pfx_record rec;
- thread_add_read(bm->master, bgpd_sync_callback, NULL,
- rpki_sync_socket_bgpd, NULL);
+ event_add_read(bm->master, bgpd_sync_callback, NULL,
+ rpki_sync_socket_bgpd, NULL);
if (atomic_load_explicit(&rtr_update_overflow, memory_order_seq_cst)) {
while (read(rpki_sync_socket_bgpd, &rec,
rrp->prefix = prefix;
rrp->afi = afi;
rrp->safi = safi;
- thread_add_event(bm->master, rpki_revalidate_prefix,
- rrp, 0, &bgp->t_revalidate[afi][safi]);
+ event_add_event(bm->master, rpki_revalidate_prefix, rrp,
+ 0, &bgp->t_revalidate[afi][safi]);
}
}
}
struct peer *peer;
};
-static void bgp_rpki_revalidate_peer(struct thread *thread)
+static void bgp_rpki_revalidate_peer(struct event *thread)
{
- struct rpki_revalidate_peer *rvp = THREAD_ARG(thread);
+ struct rpki_revalidate_peer *rvp = EVENT_ARG(thread);
/*
* Here's the expensive bit of gnomish deviousness
rvp->afi = afi;
rvp->safi = safi;
- thread_add_event(
+ event_add_event(
bm->master, bgp_rpki_revalidate_peer,
rvp, 0,
&peer->t_revalidate_all[afi][safi]);
}
- thread_add_read(bm->master, bgpd_sync_callback, NULL,
- rpki_sync_socket_bgpd, NULL);
+ event_add_read(bm->master, bgpd_sync_callback, NULL,
+ rpki_sync_socket_bgpd, NULL);
return;
}
-static int bgp_rpki_init(struct thread_master *master)
+static int bgp_rpki_init(struct event_loop *master)
{
rpki_debug = false;
rtr_is_running = false;
return 0;
}
-static void sync_expired(struct thread *thread)
+static void sync_expired(struct event *thread)
{
if (!rtr_mgr_conf_in_sync(rtr_config)) {
RPKI_DEBUG("rtr_mgr is not synced, retrying.");
- thread_add_timer(bm->master, sync_expired, NULL,
- BGP_RPKI_CACHE_SERVER_SYNC_RETRY_TIMEOUT,
- &t_rpki_sync);
+ event_add_timer(bm->master, sync_expired, NULL,
+ BGP_RPKI_CACHE_SERVER_SYNC_RETRY_TIMEOUT,
+ &t_rpki_sync);
return;
}
return ERROR;
}
- thread_add_timer(bm->master, sync_expired, NULL, 0, &t_rpki_sync);
+ event_add_timer(bm->master, sync_expired, NULL, 0, &t_rpki_sync);
XFREE(MTYPE_BGP_RPKI_CACHE_GROUP, groups);
{
rtr_is_stopping = true;
if (is_running()) {
- THREAD_OFF(t_rpki_sync);
+ EVENT_OFF(t_rpki_sync);
rtr_mgr_stop(rtr_config);
rtr_mgr_free(rtr_config);
rtr_is_running = false;
#include "log.h"
#include "prefix.h"
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "smux.h"
#include "filter.h"
#include "hook.h"
#include "bgpd/bgp_snmp_bgp4v2.h"
#include "bgpd/bgp_mplsvpn_snmp.h"
-static int bgp_snmp_init(struct thread_master *tm)
+static int bgp_snmp_init(struct event_loop *tm)
{
smux_init(tm);
bgp_snmp_bgp4_init(tm);
#include "log.h"
#include "prefix.h"
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "smux.h"
#include "filter.h"
#include "hook.h"
return 0;
}
-int bgp_snmp_bgp4_init(struct thread_master *tm)
+int bgp_snmp_bgp4_init(struct event_loop *tm)
{
REGISTER_MIB("mibII/bgp", bgp_variables, variable, bgp_oid);
return 0;
extern int bgpTrapEstablished(struct peer *peer);
extern int bgpTrapBackwardTransition(struct peer *peer);
-extern int bgp_snmp_bgp4_init(struct thread_master *tm);
+extern int bgp_snmp_bgp4_init(struct event_loop *tm);
#endif /* _FRR_BGP_SNMP_BGP4_H_ */
#include "log.h"
#include "prefix.h"
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "smux.h"
#include "filter.h"
#include "hook.h"
{1, 9, 1, BGP4V2_NLRI_PATH_ATTR_UNKNOWN, 2, 16}},
};
-int bgp_snmp_bgp4v2_init(struct thread_master *tm)
+int bgp_snmp_bgp4v2_init(struct event_loop *tm)
{
REGISTER_MIB("mibII/bgpv2", bgpv2_variables, variable, bgpv2_oid);
return 0;
#define BGP4V2_ESTABLISHED_NOTIFICATION 1
#define BGP4V2_BACKWARD_TRANSITION_NOTIFICATION 2
-extern int bgp_snmp_bgp4v2_init(struct thread_master *tm);
+extern int bgp_snmp_bgp4v2_init(struct event_loop *tm);
#endif /* _FRR_BGP_SNMP_BGP4V2_H_ */
/* soft_reconfig_table in progress */
bool soft_reconfig_init;
- struct thread *soft_reconfig_thread;
+ struct event *soft_reconfig_thread;
/* list of peers on which soft_reconfig_table has to run */
struct list *soft_reconfig_peers;
#include <zebra.h>
#include "prefix.h"
-#include "thread.h"
+#include "frrevent.h"
#include "buffer.h"
#include "stream.h"
#include "command.h"
static void sync_delete(struct update_subgroup *subgrp)
{
XFREE(MTYPE_BGP_SYNCHRONISE, subgrp->sync);
- if (subgrp->hash) {
- hash_clean(subgrp->hash,
- (void (*)(void *))bgp_advertise_attr_free);
- hash_free(subgrp->hash);
- }
- subgrp->hash = NULL;
+ hash_clean_and_free(&subgrp->hash,
+ (void (*)(void *))bgp_advertise_attr_free);
+
if (subgrp->work)
stream_free(subgrp->work);
subgrp->work = NULL;
if (subgrp->update_group)
UPDGRP_INCR_STAT(subgrp->update_group, subgrps_deleted);
- THREAD_OFF(subgrp->t_merge_check);
- THREAD_OFF(subgrp->t_coalesce);
+ EVENT_OFF(subgrp->t_merge_check);
+ EVENT_OFF(subgrp->t_coalesce);
bpacket_queue_cleanup(SUBGRP_PKTQ(subgrp));
subgroup_clear_table(subgrp);
/*
* update_subgroup_merge_check_thread_cb
*/
-static void update_subgroup_merge_check_thread_cb(struct thread *thread)
+static void update_subgroup_merge_check_thread_cb(struct event *thread)
{
struct update_subgroup *subgrp;
- subgrp = THREAD_ARG(thread);
+ subgrp = EVENT_ARG(thread);
subgrp->t_merge_check = NULL;
return false;
subgrp->t_merge_check = NULL;
- thread_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
- subgrp, 0, &subgrp->t_merge_check);
+ event_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
+ subgrp, 0, &subgrp->t_merge_check);
SUBGRP_INCR_STAT(subgrp, merge_checks_triggered);
return UPDWALK_CONTINUE;
}
-void update_group_refresh_default_originate_route_map(struct thread *thread)
+void update_group_refresh_default_originate_route_map(struct event *thread)
{
struct bgp *bgp;
char reason[] = "refresh default-originate route-map";
- bgp = THREAD_ARG(thread);
+ bgp = EVENT_ARG(thread);
update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
reason);
- THREAD_OFF(bgp->t_rmap_def_originate_eval);
+ EVENT_OFF(bgp->t_rmap_def_originate_eval);
bgp_unlock(bgp);
}
*/
SUBGRP_FOREACH_PEER (subgrp, paf)
if (peer_established(paf->peer))
- thread_add_timer_msec(
+ event_add_timer_msec(
bm->master, bgp_generate_updgrp_packets,
paf->peer, 0,
&paf->peer->t_generate_updgrp_packets);
/* announcement attribute hash */
struct hash *hash;
- struct thread *t_coalesce;
+ struct event *t_coalesce;
uint32_t v_coalesce;
- struct thread *t_merge_check;
+ struct event *t_merge_check;
/* table version that the subgroup has caught up to. */
uint64_t version;
extern void update_group_walk(struct bgp *bgp, updgrp_walkcb cb, void *ctx);
extern void update_group_periodic_merge(struct bgp *bgp);
extern void
-update_group_refresh_default_originate_route_map(struct thread *thread);
+update_group_refresh_default_originate_route_map(struct event *thread);
extern void update_group_start_advtimer(struct bgp *bgp);
extern void update_subgroup_inherit_info(struct update_subgroup *to,
#include "memory.h"
#include "prefix.h"
#include "hash.h"
-#include "thread.h"
+#include "frrevent.h"
#include "queue.h"
#include "routemap.h"
#include "filter.h"
update_group_af_walk(bgp, afi, safi, updgrp_show_adj_walkcb, &ctx);
}
-static void subgroup_coalesce_timer(struct thread *thread)
+static void subgroup_coalesce_timer(struct event *thread)
{
struct update_subgroup *subgrp;
struct bgp *bgp;
- subgrp = THREAD_ARG(thread);
+ subgrp = EVENT_ARG(thread);
if (bgp_debug_update(NULL, NULL, subgrp->update_group, 0))
zlog_debug("u%" PRIu64 ":s%" PRIu64" announcing routes upon coalesce timer expiry(%u ms)",
(SUBGRP_UPDGRP(subgrp))->id, subgrp->id,
SUBGRP_FOREACH_PEER (subgrp, paf) {
peer = PAF_PEER(paf);
- THREAD_OFF(peer->t_routeadv);
+ EVENT_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
}
}
* We should wait for the coalesce timer. Arm the timer if not done.
*/
if (!subgrp->t_coalesce) {
- thread_add_timer_msec(bm->master, subgroup_coalesce_timer,
- subgrp, subgrp->v_coalesce,
- &subgrp->t_coalesce);
+ event_add_timer_msec(bm->master, subgroup_coalesce_timer,
+ subgrp, subgrp->v_coalesce,
+ &subgrp->t_coalesce);
}
}
#include <zebra.h>
#include "prefix.h"
-#include "thread.h"
+#include "frrevent.h"
#include "buffer.h"
#include "stream.h"
#include "command.h"
#include "buffer.h"
#include "linklist.h"
#include "stream.h"
-#include "thread.h"
+#include "frrevent.h"
#include "log.h"
#include "memory.h"
#include "lib_vty.h"
/* Cancel max-med onstartup if its on */
if (bgp->t_maxmed_onstartup) {
- THREAD_OFF(bgp->t_maxmed_onstartup);
+ EVENT_OFF(bgp->t_maxmed_onstartup);
bgp->maxmed_onstartup_over = 1;
}
* fired.
*/
if (!rmap_delay_timer && bm->t_rmap_update) {
- THREAD_OFF(bm->t_rmap_update);
- thread_execute(bm->master, bgp_route_map_update_timer,
- NULL, 0);
+ EVENT_OFF(bm->t_rmap_update);
+ event_execute(bm->master, bgp_route_map_update_timer,
+ NULL, 0);
}
return CMD_SUCCESS;
} else {
if (peer->t_gr_stale != NULL) {
json_object_int_add(json_timer,
"stalePathTimerRemaining",
- thread_timer_remain_second(
+ event_timer_remain_second(
peer->t_gr_stale));
}
json_object_int_add(
json_timer,
"selectionDeferralTimerRemaining",
- thread_timer_remain_second(
+ event_timer_remain_second(
peer->bgp->gr_info[afi][safi]
.t_select_deferral));
}
if (peer->t_gr_stale != NULL)
vty_out(vty,
" Stale Path Remaining(sec): %ld\n",
- thread_timer_remain_second(
+ event_timer_remain_second(
peer->t_gr_stale));
/* Display Configured Selection
* Deferral only when when
NULL)
vty_out(vty,
" Selection Deferral Time Remaining(sec): %ld\n",
- thread_timer_remain_second(
+ event_timer_remain_second(
peer->bgp->gr_info[afi][safi]
.t_select_deferral));
}
if (p->t_gr_restart != NULL)
json_object_int_add(
json_timer, "restartTimerRemaining",
- thread_timer_remain_second(p->t_gr_restart));
+ event_timer_remain_second(p->t_gr_restart));
json_object_object_add(json, "timers", json_timer);
} else {
p->v_gr_restart);
if (p->t_gr_restart != NULL)
vty_out(vty, " Restart Time Remaining(sec): %ld\n",
- thread_timer_remain_second(p->t_gr_restart));
+ event_timer_remain_second(p->t_gr_restart));
if (p->t_gr_restart != NULL) {
vty_out(vty, " Restart Time Remaining(sec): %ld\n",
- thread_timer_remain_second(p->t_gr_restart));
+ event_timer_remain_second(p->t_gr_restart));
}
}
}
json_neigh,
"bgpTimerConfiguredConditionalAdvertisementsSec",
bgp->condition_check_period);
- if (thread_is_scheduled(bgp->t_condition_check))
+ if (event_is_scheduled(bgp->t_condition_check))
json_object_int_add(
json_neigh,
"bgpTimerUntilConditionalAdvertisementsSec",
- thread_timer_remain_second(
+ event_timer_remain_second(
bgp->t_condition_check));
} else {
/* Administrative shutdown. */
vty_out(vty,
" Configured conditional advertisements interval is %d seconds\n",
bgp->condition_check_period);
- if (thread_is_scheduled(bgp->t_condition_check))
+ if (event_is_scheduled(bgp->t_condition_check))
vty_out(vty,
" Time until conditional advertisements begin is %lu seconds\n",
- thread_timer_remain_second(
+ event_timer_remain_second(
bgp->t_condition_check));
}
/* Capability. */
if (p->t_gr_restart)
json_object_int_add(
json_grace, "gracefulRestartTimerMsecs",
- thread_timer_remain_second(p->t_gr_restart) *
+ event_timer_remain_second(p->t_gr_restart) *
1000);
if (p->t_gr_stale)
json_object_int_add(
json_grace, "gracefulStalepathTimerMsecs",
- thread_timer_remain_second(p->t_gr_stale) *
+ event_timer_remain_second(p->t_gr_stale) *
1000);
/* more gr info in new format */
BGP_SHOW_PEER_GR_CAPABILITY(vty, p, json_grace);
if (p->t_gr_restart)
vty_out(vty,
" The remaining time of restart timer is %ld\n",
- thread_timer_remain_second(p->t_gr_restart));
+ event_timer_remain_second(p->t_gr_restart));
if (p->t_gr_stale)
vty_out(vty,
" The remaining time of stalepath timer is %ld\n",
- thread_timer_remain_second(p->t_gr_stale));
+ event_timer_remain_second(p->t_gr_stale));
/* more gr info in new format */
BGP_SHOW_PEER_GR_CAPABILITY(vty, p, NULL);
json_neigh, "reducePrefixNumFrom");
json_object_int_add(json_neigh,
"restartInTimerMsec",
- thread_timer_remain_second(
- p->t_pmax_restart)
- * 1000);
+ event_timer_remain_second(
+ p->t_pmax_restart) *
+ 1000);
} else
vty_out(vty,
" Reduce the no. of prefix from %s, will restart in %ld seconds\n",
- p->host, thread_timer_remain_second(
- p->t_pmax_restart));
+ p->host,
+ event_timer_remain_second(
+ p->t_pmax_restart));
} else {
if (use_json)
json_object_boolean_true_add(
if (p->t_start)
json_object_int_add(
json_neigh, "nextStartTimerDueInMsecs",
- thread_timer_remain_second(p->t_start) * 1000);
+ event_timer_remain_second(p->t_start) * 1000);
if (p->t_connect)
json_object_int_add(
json_neigh, "nextConnectTimerDueInMsecs",
- thread_timer_remain_second(p->t_connect)
- * 1000);
+ event_timer_remain_second(p->t_connect) * 1000);
if (p->t_routeadv) {
json_object_int_add(json_neigh, "mraiInterval",
p->v_routeadv);
json_object_int_add(
json_neigh, "mraiTimerExpireInMsecs",
- thread_timer_remain_second(p->t_routeadv)
- * 1000);
+ event_timer_remain_second(p->t_routeadv) *
+ 1000);
}
if (p->password)
json_object_int_add(json_neigh, "authenticationEnabled",
}
if (p->t_start)
vty_out(vty, "Next start timer due in %ld seconds\n",
- thread_timer_remain_second(p->t_start));
+ event_timer_remain_second(p->t_start));
if (p->t_connect)
vty_out(vty, "Next connect timer due in %ld seconds\n",
- thread_timer_remain_second(p->t_connect));
+ event_timer_remain_second(p->t_connect));
if (p->t_routeadv)
vty_out(vty,
"MRAI (interval %u) timer expires in %ld seconds\n",
p->v_routeadv,
- thread_timer_remain_second(p->t_routeadv));
+ event_timer_remain_second(p->t_routeadv));
if (p->password)
vty_out(vty, "Peer Authentication Enabled\n");
DEFINE_HOOK(bgp_config_end, (struct bgp *bgp), (bgp));
-static struct thread *t_bgp_cfg;
+static struct event *t_bgp_cfg;
bool bgp_config_inprocess(void)
{
- return thread_is_scheduled(t_bgp_cfg);
+ return event_is_scheduled(t_bgp_cfg);
}
-static void bgp_config_finish(struct thread *t)
+static void bgp_config_finish(struct event *t)
{
struct listnode *node;
struct bgp *bgp;
static void bgp_config_start(void)
{
#define BGP_PRE_CONFIG_MAX_WAIT_SECONDS 600
- THREAD_OFF(t_bgp_cfg);
- thread_add_timer(bm->master, bgp_config_finish, NULL,
- BGP_PRE_CONFIG_MAX_WAIT_SECONDS, &t_bgp_cfg);
+ EVENT_OFF(t_bgp_cfg);
+ event_add_timer(bm->master, bgp_config_finish, NULL,
+ BGP_PRE_CONFIG_MAX_WAIT_SECONDS, &t_bgp_cfg);
}
/* When we receive a hook the configuration is read,
{
#define BGP_POST_CONFIG_DELAY_SECONDS 1
uint32_t bgp_post_config_delay =
- thread_is_scheduled(bm->t_rmap_update)
- ? thread_timer_remain_second(bm->t_rmap_update)
+ event_is_scheduled(bm->t_rmap_update)
+ ? event_timer_remain_second(bm->t_rmap_update)
: BGP_POST_CONFIG_DELAY_SECONDS;
/* If BGP config processing thread isn't running, then
if (!bgp_config_inprocess())
return;
- THREAD_OFF(t_bgp_cfg);
+ EVENT_OFF(t_bgp_cfg);
/* Start a new timer to make sure we don't send EoR
* before route-maps are processed.
*/
- thread_add_timer(bm->master, bgp_config_finish, NULL,
- bgp_post_config_delay, &t_bgp_cfg);
+ event_add_timer(bm->master, bgp_config_finish, NULL,
+ bgp_post_config_delay, &t_bgp_cfg);
}
static int config_write_interface_one(struct vty *vty, struct vrf *vrf)
#include "sockunion.h"
#include "zclient.h"
#include "routemap.h"
-#include "thread.h"
+#include "frrevent.h"
#include "queue.h"
#include "memory.h"
#include "lib/json.h"
peer->bgp->vrf_id);
}
+ /* Handle peerings via loopbacks. For instance, peer between
+ * 127.0.0.1 and 127.0.0.2. In short, allow peering with self
+ * via 127.0.0.0/8.
+ */
+ if (!ifp && cmd_allow_reserved_ranges_get())
+ ifp = if_get_vrf_loopback(peer->bgp->vrf_id);
+
if (!ifp) {
/*
* BGP views do not currently get proper data
return false;
}
-static struct thread *bgp_tm_thread_connect;
+static struct event *bgp_tm_thread_connect;
static bool bgp_tm_status_connected;
static bool bgp_tm_chunk_obtained;
#define BGP_FLOWSPEC_TABLE_CHUNK 100000
static uint32_t bgp_tm_min, bgp_tm_max, bgp_tm_chunk_size;
struct bgp *bgp_tm_bgp;
-static void bgp_zebra_tm_connect(struct thread *t)
+static void bgp_zebra_tm_connect(struct event *t)
{
struct zclient *zclient;
int delay = 10, ret = 0;
- zclient = THREAD_ARG(t);
+ zclient = EVENT_ARG(t);
if (bgp_tm_status_connected && zclient->sock > 0)
delay = 60;
else {
}
}
}
- thread_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
- &bgp_tm_thread_connect);
+ event_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
+ &bgp_tm_thread_connect);
}
bool bgp_zebra_tm_chunk_obtained(void)
bgp_tm_min = bgp_tm_max = 0;
bgp_tm_chunk_size = BGP_FLOWSPEC_TABLE_CHUNK;
bgp_tm_bgp = bgp;
- thread_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
- &bgp_tm_thread_connect);
+ event_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
+ &bgp_tm_thread_connect);
}
int bgp_zebra_get_table_range(uint32_t chunk_size,
uint32_t bos = 0;
uint32_t exp = 0;
+ /*
+ * BGP is installing this route and bgp has been configured
+ * to suppress announcements until the route has been installed
+ * let's set the fact that we expect this route to be installed
+ */
+ if (BGP_SUPPRESS_FIB_ENABLED(bgp))
+ SET_FLAG(dest->flags, BGP_NODE_FIB_INSTALL_PENDING);
+
/* Don't try to install if we're not connected to Zebra or Zebra doesn't
* know of this instance.
*/
struct zapi_route api;
struct peer *peer;
+ /*
+ * If we are withdrawing the route, we don't need to have this
+ * flag set. So unset it.
+ */
+ UNSET_FLAG(info->net->flags, BGP_NODE_FIB_INSTALL_PENDING);
+
/* Don't try to install if we're not connected to Zebra or Zebra doesn't
* know of this instance.
*/
hook_register_prio(if_del, 0, bgp_if_delete_hook);
}
-void bgp_zebra_init(struct thread_master *master, unsigned short instance)
+void bgp_zebra_init(struct event_loop *master, unsigned short instance)
{
zclient_num_connects = 0;
/* Default weight for next hop, if doing weighted ECMP. */
#define BGP_ZEBRA_DEFAULT_NHOP_WEIGHT 1
-extern void bgp_zebra_init(struct thread_master *master,
- unsigned short instance);
+extern void bgp_zebra_init(struct event_loop *master, unsigned short instance);
extern void bgp_if_init(void);
extern void bgp_zebra_init_tm_connect(struct bgp *bgp);
extern uint32_t bgp_zebra_tm_get_id(void);
#include <zebra.h>
#include "prefix.h"
-#include "thread.h"
+#include "frrevent.h"
#include "buffer.h"
#include "stream.h"
#include "ringbuf.h"
bgp_timer_set(peer);
bgp_reads_off(peer);
bgp_writes_off(peer);
- thread_cancel_event_ready(bm->master, peer);
+ event_cancel_event_ready(bm->master, peer);
FOREACH_AFI_SAFI (afi, safi)
- THREAD_OFF(peer->t_revalidate_all[afi][safi]);
+ EVENT_OFF(peer->t_revalidate_all[afi][safi]);
assert(!peer->t_write);
assert(!peer->t_read);
BGP_EVENT_FLUSH(peer);
peer_dst->v_delayopen = peer_src->v_delayopen;
/* password apply */
- if (peer_src->password && !peer_dst->password)
+ if (peer_src->password) {
+ XFREE(MTYPE_PEER_PASSWORD, peer_dst->password);
peer_dst->password =
XSTRDUP(MTYPE_PEER_PASSWORD, peer_src->password);
+ }
FOREACH_AFI_SAFI (afi, safi) {
peer_dst->afc[afi][safi] = peer_src->afc[afi][safi];
FOREACH_AFI_SAFI_NSF (afi, safi) {
peer->nsf[afi][safi] = 0;
- THREAD_OFF(peer->t_llgr_stale[afi][safi]);
+ EVENT_OFF(peer->t_llgr_stale[afi][safi]);
}
if (peer->t_gr_restart) {
- THREAD_OFF(peer->t_gr_restart);
+ EVENT_OFF(peer->t_gr_restart);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP graceful restart timer stopped", peer);
}
if (peer->t_gr_stale) {
- THREAD_OFF(peer->t_gr_stale);
+ EVENT_OFF(peer->t_gr_stale);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP graceful restart stalepath timer stopped",
bgp_keepalives_off(peer);
bgp_reads_off(peer);
bgp_writes_off(peer);
- thread_cancel_event_ready(bm->master, peer);
+ event_cancel_event_ready(bm->master, peer);
FOREACH_AFI_SAFI (afi, safi)
- THREAD_OFF(peer->t_revalidate_all[afi][safi]);
+ EVENT_OFF(peer->t_revalidate_all[afi][safi]);
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON));
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_READS_ON));
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON));
return 0;
}
-static void bgp_startup_timer_expire(struct thread *thread)
+static void bgp_startup_timer_expire(struct event *thread)
{
struct bgp *bgp;
- bgp = THREAD_ARG(thread);
+ bgp = EVENT_ARG(thread);
bgp->t_startup = NULL;
}
if (name)
bgp->name = XSTRDUP(MTYPE_BGP, name);
- thread_add_timer(bm->master, bgp_startup_timer_expire, bgp,
- bgp->restart_time, &bgp->t_startup);
+ event_add_timer(bm->master, bgp_startup_timer_expire, bgp,
+ bgp->restart_time, &bgp->t_startup);
/* printable name we can use in debug messages */
if (inst_type == BGP_INSTANCE_TYPE_DEFAULT) {
/* Stop timers. */
if (bgp->t_rmap_def_originate_eval) {
- THREAD_OFF(bgp->t_rmap_def_originate_eval);
+ EVENT_OFF(bgp->t_rmap_def_originate_eval);
bgp_unlock(bgp); /* TODO - This timer is started with a lock -
why? */
}
hook_call(bgp_inst_delete, bgp);
FOREACH_AFI_SAFI (afi, safi)
- THREAD_OFF(bgp->t_revalidate[afi][safi]);
+ EVENT_OFF(bgp->t_revalidate[afi][safi]);
- THREAD_OFF(bgp->t_condition_check);
- THREAD_OFF(bgp->t_startup);
- THREAD_OFF(bgp->t_maxmed_onstartup);
- THREAD_OFF(bgp->t_update_delay);
- THREAD_OFF(bgp->t_establish_wait);
+ EVENT_OFF(bgp->t_condition_check);
+ EVENT_OFF(bgp->t_startup);
+ EVENT_OFF(bgp->t_maxmed_onstartup);
+ EVENT_OFF(bgp->t_update_delay);
+ EVENT_OFF(bgp->t_establish_wait);
/* Set flag indicating bgp instance delete in progress */
SET_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS);
/* Delete the graceful restart info */
FOREACH_AFI_SAFI (afi, safi) {
- struct thread *t;
+ struct event *t;
gr_info = &bgp->gr_info[afi][safi];
if (!gr_info)
continue;
t = gr_info->t_select_deferral;
if (t) {
- void *info = THREAD_ARG(t);
+ void *info = EVENT_ARG(t);
XFREE(MTYPE_TMP, info);
}
- THREAD_OFF(gr_info->t_select_deferral);
+ EVENT_OFF(gr_info->t_select_deferral);
t = gr_info->t_route_select;
if (t) {
- void *info = THREAD_ARG(t);
+ void *info = EVENT_ARG(t);
XFREE(MTYPE_TMP, info);
}
- THREAD_OFF(gr_info->t_route_select);
+ EVENT_OFF(gr_info->t_route_select);
}
if (BGP_DEBUG(zebra, ZEBRA)) {
/* Stop timers. */
if (bgp->t_rmap_def_originate_eval) {
- THREAD_OFF(bgp->t_rmap_def_originate_eval);
+ EVENT_OFF(bgp->t_rmap_def_originate_eval);
bgp_unlock(bgp); /* TODO - This timer is started with a lock -
why? */
}
if (bgp->process_queue)
work_queue_free_and_null(&bgp->process_queue);
- thread_master_free_unused(bm->master);
+ event_master_free_unused(bm->master);
bgp_unlock(bgp); /* initial reference */
return 0;
UNSET_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW);
if (peer->t_pmax_restart) {
- THREAD_OFF(peer->t_pmax_restart);
+ EVENT_OFF(peer->t_pmax_restart);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP Maximum-prefix restart timer canceled",
return;
if (CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_OLD_RCV) ||
- CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_NEW_RCV)) {
- if (CHECK_FLAG(peer->af_cap[afi][safi],
- PEER_CAP_ORF_PREFIX_SM_ADV) &&
- (CHECK_FLAG(peer->af_cap[afi][safi],
- PEER_CAP_ORF_PREFIX_RM_RCV) ||
- CHECK_FLAG(peer->af_cap[afi][safi],
- PEER_CAP_ORF_PREFIX_RM_OLD_RCV)))
- peer_clear_soft(peer, afi, safi,
- BGP_CLEAR_SOFT_IN_ORF_PREFIX);
- else
- bgp_route_refresh_send(
- peer, afi, safi, 0, 0, 0,
- BGP_ROUTE_REFRESH_NORMAL);
- }
+ CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_NEW_RCV))
+ bgp_route_refresh_send(peer, afi, safi, 0, 0, 0,
+ BGP_ROUTE_REFRESH_NORMAL);
}
}
/* If we touch prefix-list, we need to process
* new updates. This is important for ORF to
- * work correctly as well.
+ * work correctly.
*/
- if (peer->afc_nego[afi][safi])
- peer_on_policy_change(peer, afi, safi,
- 0);
+ if (CHECK_FLAG(peer->af_cap[afi][safi],
+ PEER_CAP_ORF_PREFIX_SM_ADV) &&
+ (CHECK_FLAG(peer->af_cap[afi][safi],
+ PEER_CAP_ORF_PREFIX_RM_RCV) ||
+ CHECK_FLAG(
+ peer->af_cap[afi][safi],
+ PEER_CAP_ORF_PREFIX_RM_OLD_RCV)))
+ peer_clear_soft(
+ peer, afi, safi,
+ BGP_CLEAR_SOFT_IN_ORF_PREFIX);
}
}
for (ALL_LIST_ELEMENTS(bgp->group, node, nnode, group)) {
UNSET_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW);
if (peer->t_pmax_restart) {
- THREAD_OFF(peer->t_pmax_restart);
+ EVENT_OFF(peer->t_pmax_restart);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP Maximum-prefix restart timer cancelled",
return buf;
}
-void bgp_master_init(struct thread_master *master, const int buffer_size,
+void bgp_master_init(struct event_loop *master, const int buffer_size,
struct list *addresses)
{
qobj_init();
if (bm->listen_sockets)
list_delete(&bm->listen_sockets);
- THREAD_OFF(bm->t_rmap_update);
+ EVENT_OFF(bm->t_rmap_update);
bgp_mac_finish();
}
struct list *bgp;
/* BGP thread master. */
- struct thread_master *master;
+ struct event_loop *master;
/* Listening sockets */
struct list *listen_sockets;
uint64_t subgrp_idspace;
/* timer to dampen route map changes */
- struct thread *t_rmap_update; /* Handle route map updates */
+ struct event *t_rmap_update; /* Handle route map updates */
uint32_t rmap_update_timer; /* Route map update timer */
#define RMAP_DEFAULT_UPDATE_TIMER 5 /* disabled by default */
/* Count of EOR received */
uint32_t eor_received;
/* Deferral Timer */
- struct thread *t_select_deferral;
+ struct event *t_select_deferral;
/* Routes Deferred */
uint32_t gr_deferred;
/* Best route select */
- struct thread *t_route_select;
+ struct event *t_route_select;
/* AFI, SAFI enabled */
bool af_enabled[AFI_MAX][SAFI_MAX];
/* Route update completed */
struct as_confed *confed_peers;
int confed_peers_cnt;
- struct thread
- *t_startup; /* start-up timer on only once at the beginning */
+ /* start-up timer on only once at the beginning */
+ struct event *t_startup;
uint32_t v_maxmed_onstartup; /* Duration of max-med on start-up */
#define BGP_MAXMED_ONSTARTUP_UNCONFIGURED 0 /* 0 means off, its the default */
uint32_t maxmed_onstartup_value; /* Max-med value when active on
start-up */
- struct thread
- *t_maxmed_onstartup; /* non-null when max-med onstartup is on */
+
+ /* non-null when max-med onstartup is on */
+ struct event *t_maxmed_onstartup;
uint8_t maxmed_onstartup_over; /* Flag to make it effective only once */
bool v_maxmed_admin; /* true/false if max-med administrative is on/off
uint32_t maxmed_value; /* Max-med value when its active */
/* BGP update delay on startup */
- struct thread *t_update_delay;
- struct thread *t_establish_wait;
- struct thread *t_revalidate[AFI_MAX][SAFI_MAX];
+ struct event *t_update_delay;
+ struct event *t_establish_wait;
+ struct event *t_revalidate[AFI_MAX][SAFI_MAX];
uint8_t update_delay_over;
uint8_t main_zebra_update_hold;
struct hash *pbr_action_hash;
/* timer to re-evaluate neighbor default-originate route-maps */
- struct thread *t_rmap_def_originate_eval;
+ struct event *t_rmap_def_originate_eval;
#define RMAP_DEFAULT_ORIGINATE_EVAL_TIMER 5
/* BGP distance configuration. */
/* BGP Conditional advertisement */
uint32_t condition_check_period;
uint32_t condition_filter_count;
- struct thread *t_condition_check;
+ struct event *t_condition_check;
/* BGP VPN SRv6 backend */
bool srv6_enabled;
/*
* Trigger timer for bgp_announce_route().
*/
- struct thread *t_announce_route;
+ struct event *t_announce_route;
afi_t afi;
safi_t safi;
_Atomic uint32_t v_gr_restart;
/* Threads. */
- struct thread *t_read;
- struct thread *t_write;
- struct thread *t_start;
- struct thread *t_connect_check_r;
- struct thread *t_connect_check_w;
- struct thread *t_connect;
- struct thread *t_holdtime;
- struct thread *t_routeadv;
- struct thread *t_delayopen;
- struct thread *t_pmax_restart;
- struct thread *t_gr_restart;
- struct thread *t_gr_stale;
- struct thread *t_llgr_stale[AFI_MAX][SAFI_MAX];
- struct thread *t_revalidate_all[AFI_MAX][SAFI_MAX];
- struct thread *t_generate_updgrp_packets;
- struct thread *t_process_packet;
- struct thread *t_process_packet_error;
- struct thread *t_refresh_stalepath;
+ struct event *t_read;
+ struct event *t_write;
+ struct event *t_start;
+ struct event *t_connect_check_r;
+ struct event *t_connect_check_w;
+ struct event *t_connect;
+ struct event *t_holdtime;
+ struct event *t_routeadv;
+ struct event *t_delayopen;
+ struct event *t_pmax_restart;
+ struct event *t_gr_restart;
+ struct event *t_gr_stale;
+ struct event *t_llgr_stale[AFI_MAX][SAFI_MAX];
+ struct event *t_revalidate_all[AFI_MAX][SAFI_MAX];
+ struct event *t_generate_updgrp_packets;
+ struct event *t_process_packet;
+ struct event *t_process_packet_error;
+ struct event *t_refresh_stalepath;
/* Thread flags. */
_Atomic uint32_t thread_flags;
extern int bgp_config_write(struct vty *);
-extern void bgp_master_init(struct thread_master *master, const int buffer_size,
+extern void bgp_master_init(struct event_loop *master, const int buffer_size,
struct list *addresses);
extern void bgp_init(unsigned short instance);
extern void peer_tx_shutdown_message_set(struct peer *, const char *msg);
extern void peer_tx_shutdown_message_unset(struct peer *);
-extern void bgp_route_map_update_timer(struct thread *thread);
+extern void bgp_route_map_update_timer(struct event *thread);
extern const char *bgp_get_name_by_role(uint8_t role);
extern enum asnotation_mode bgp_get_asnotation(struct bgp *bgp);
* return value:
* rfp_start_val rfp returned value passed on rfp_stop and other rfapi calls
--------------------------------------------*/
-extern void *rfp_start(struct thread_master *master,
- struct rfapi_rfp_cfg **cfgp,
+extern void *rfp_start(struct event_loop *master, struct rfapi_rfp_cfg **cfgp,
struct rfapi_rfp_cb_methods **cbmp);
/*------------------------------------------
#include "bgpd/bgp_nexthop.h"
extern void rfapi_init(void);
-extern void vnc_zebra_init(struct thread_master *master);
+extern void vnc_zebra_init(struct event_loop *master);
extern void vnc_zebra_destroy(void);
extern void rfapi_delete(struct bgp *);
#include "lib/memory.h"
#include "lib/log.h"
#include "lib/skiplist.h"
-#include "lib/thread.h"
+#include "frrevent.h"
#include "lib/stream.h"
#include "lib/lib_errors.h"
struct agg_node *rn;
int holddown_count = 0;
- int local_count = 0;
int imported_count = 0;
int remote_count = 0;
++holddown_count;
} else {
- if (RFAPI_LOCAL_BI(bpi)) {
- ++local_count;
- } else {
+ if (!RFAPI_LOCAL_BI(bpi)) {
if (RFAPI_DIRECT_IMPORT_BI(
bpi)) {
++imported_count;
if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)
&& bpi->extra->vnc.import.timer) {
struct rfapi_withdraw *wcb =
- THREAD_ARG(bpi->extra->vnc.import.timer);
+ EVENT_ARG(bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
- THREAD_OFF(bpi->extra->vnc.import.timer);
+ EVENT_OFF(bpi->extra->vnc.import.timer);
}
next = bpi->next;
/*
* Timer callback for withdraw
*/
-static void rfapiWithdrawTimerVPN(struct thread *t)
+static void rfapiWithdrawTimerVPN(struct event *t)
{
- struct rfapi_withdraw *wcb = THREAD_ARG(t);
+ struct rfapi_withdraw *wcb = EVENT_ARG(t);
struct bgp_path_info *bpi = wcb->info;
struct bgp *bgp = bgp_get_default();
const struct prefix *p;
return 0;
}
-static void rfapiWithdrawTimerEncap(struct thread *t)
+static void rfapiWithdrawTimerEncap(struct event *t)
{
- struct rfapi_withdraw *wcb = THREAD_ARG(t);
+ struct rfapi_withdraw *wcb = EVENT_ARG(t);
struct bgp_path_info *bpi = wcb->info;
int was_first_route = 0;
struct rfapi_monitor_encap *em;
rfapiBiStartWithdrawTimer(struct rfapi_import_table *import_table,
struct agg_node *rn, struct bgp_path_info *bpi,
afi_t afi, safi_t safi,
- void (*timer_service_func)(struct thread *))
+ void (*timer_service_func)(struct event *))
{
uint32_t lifetime;
struct rfapi_withdraw *wcb;
if (lifetime > UINT32_MAX / 1001) {
/* sub-optimal case, but will probably never happen */
bpi->extra->vnc.import.timer = NULL;
- thread_add_timer(bm->master, timer_service_func, wcb, lifetime,
- &bpi->extra->vnc.import.timer);
+ event_add_timer(bm->master, timer_service_func, wcb, lifetime,
+ &bpi->extra->vnc.import.timer);
} else {
static uint32_t jitter;
uint32_t lifetime_msec;
lifetime_msec = (lifetime * 1000) + jitter;
bpi->extra->vnc.import.timer = NULL;
- thread_add_timer_msec(bm->master, timer_service_func, wcb,
- lifetime_msec,
- &bpi->extra->vnc.import.timer);
+ event_add_timer_msec(bm->master, timer_service_func, wcb,
+ lifetime_msec,
+ &bpi->extra->vnc.import.timer);
}
/* re-sort route list (BGP_PATH_REMOVED routes are last) */
struct agg_node *rn, struct bgp_path_info *bpi)
{
struct rfapi_withdraw *wcb;
- struct thread t;
+ struct event t;
/*
* pretend we're an expiring timer
*/
if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)
&& bpi->extra->vnc.import.timer) {
- struct rfapi_withdraw *wcb = THREAD_ARG(
+ struct rfapi_withdraw *wcb = EVENT_ARG(
bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
- THREAD_OFF(
- bpi->extra->vnc.import.timer);
+ EVENT_OFF(bpi->extra->vnc.import.timer);
}
if (action == FIF_ACTION_UPDATE) {
* bpi
*/
struct rfapi_withdraw *wcb;
- struct thread t;
+ struct event t;
/*
* pretend we're an expiring timer
__func__);
if (bpi->extra->vnc.import.timer) {
struct rfapi_withdraw *wcb =
- THREAD_ARG(bpi->extra->vnc.import.timer);
+ EVENT_ARG(bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
- THREAD_OFF(bpi->extra->vnc.import.timer);
+ EVENT_OFF(bpi->extra->vnc.import.timer);
}
rfapiExpireEncapNow(import_table, rn, bpi);
}
int lockoffset)
{
struct rfapi_withdraw *wcb;
- struct thread t;
+ struct event t;
/*
* pretend we're an expiring timer
*/
if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)
&& bpi->extra->vnc.import.timer) {
- struct rfapi_withdraw *wcb = THREAD_ARG(
+ struct rfapi_withdraw *wcb = EVENT_ARG(
bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
- THREAD_OFF(
- bpi->extra->vnc.import.timer);
+ EVENT_OFF(bpi->extra->vnc.import.timer);
import_table->holddown_count[afi] -= 1;
RFAPI_UPDATE_ITABLE_COUNT(
__func__);
if (bpi->extra->vnc.import.timer) {
struct rfapi_withdraw *wcb =
- THREAD_ARG(bpi->extra->vnc.import.timer);
+ EVENT_ARG(bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
- THREAD_OFF(bpi->extra->vnc.import.timer);
+ EVENT_OFF(bpi->extra->vnc.import.timer);
}
rfapiExpireVpnNow(import_table, rn, bpi, 0);
}
struct agg_node *rn;
struct bgp_path_info *bpi;
struct agg_table *rt = NULL;
- void (*timer_service_func)(struct thread *) = NULL;
+ void (*timer_service_func)(struct event *) = NULL;
assert(afi == AFI_IP || afi == AFI_IP6);
continue;
if (bpi->extra->vnc.import.timer) {
struct rfapi_withdraw *wcb =
- THREAD_ARG(
+ EVENT_ARG(
bpi->extra->vnc
.import
.timer);
afi, 1);
XFREE(MTYPE_RFAPI_WITHDRAW,
wcb);
- THREAD_OFF(
- bpi->extra->vnc.import
- .timer);
+ EVENT_OFF(bpi->extra->vnc.import
+ .timer);
}
} else {
if (!delete_active)
#ifndef QUAGGA_HGP_RFAPI_IMPORT_H
#define QUAGGA_HGP_RFAPI_IMPORT_H
-#include "lib/thread.h"
+#include "frrevent.h"
/*
* These are per-rt-import-list
rfapiMonitorDetachImport(m);
}
- THREAD_OFF(m->timer);
+ EVENT_OFF(m->timer);
/*
* remove from rfd list
rfapiMonitorDetachImport(m);
}
- THREAD_OFF(m->timer);
+ EVENT_OFF(m->timer);
XFREE(MTYPE_RFAPI_MONITOR, m);
rn->info = NULL;
#endif
}
- THREAD_OFF(mon_eth->timer);
+ EVENT_OFF(mon_eth->timer);
/*
* remove from rfd list
bgp->rfapi_cfg->flags &= ~BGP_VNC_CONFIG_RESPONSE_REMOVAL_DISABLE;
}
-static void rfapiMonitorTimerExpire(struct thread *t)
+static void rfapiMonitorTimerExpire(struct event *t)
{
- struct rfapi_monitor_vpn *m = THREAD_ARG(t);
+ struct rfapi_monitor_vpn *m = EVENT_ARG(t);
/* forget reference to thread, it's gone */
m->timer = NULL;
static void rfapiMonitorTimerRestart(struct rfapi_monitor_vpn *m)
{
- unsigned long remain = thread_timer_remain_second(m->timer);
+ unsigned long remain = event_timer_remain_second(m->timer);
/* unexpected case, but avoid wraparound problems below */
if (remain > m->rfd->response_lifetime)
if (m->rfd->response_lifetime - remain < 2)
return;
- THREAD_OFF(m->timer);
+ EVENT_OFF(m->timer);
{
char buf[BUFSIZ];
m->rfd->response_lifetime);
}
- thread_add_timer(bm->master, rfapiMonitorTimerExpire, m,
- m->rfd->response_lifetime, &m->timer);
+ event_add_timer(bm->master, rfapiMonitorTimerExpire, m,
+ m->rfd->response_lifetime, &m->timer);
}
/*
}
}
-static void rfapiMonitorEthTimerExpire(struct thread *t)
+static void rfapiMonitorEthTimerExpire(struct event *t)
{
- struct rfapi_monitor_eth *m = THREAD_ARG(t);
+ struct rfapi_monitor_eth *m = EVENT_ARG(t);
/* forget reference to thread, it's gone */
m->timer = NULL;
static void rfapiMonitorEthTimerRestart(struct rfapi_monitor_eth *m)
{
- unsigned long remain = thread_timer_remain_second(m->timer);
+ unsigned long remain = event_timer_remain_second(m->timer);
/* unexpected case, but avoid wraparound problems below */
if (remain > m->rfd->response_lifetime)
if (m->rfd->response_lifetime - remain < 2)
return;
- THREAD_OFF(m->timer);
+ EVENT_OFF(m->timer);
{
char buf[BUFSIZ];
m->rfd->response_lifetime);
}
- thread_add_timer(bm->master, rfapiMonitorEthTimerExpire, m,
- m->rfd->response_lifetime, &m->timer);
+ event_add_timer(bm->master, rfapiMonitorEthTimerExpire, m,
+ m->rfd->response_lifetime, &m->timer);
}
static int mon_eth_cmp(const void *a, const void *b)
rfapiMonitorEthDetachImport(bgp, val);
}
- THREAD_OFF(val->timer);
+ EVENT_OFF(val->timer);
/*
* remove from rfd list
#define RFAPI_MON_FLAG_NEEDCALLBACK 0x00000001 /* deferred callback */
// int dcount; /* debugging counter */
- struct thread *timer;
+ struct event *timer;
};
struct rfapi_monitor_encap {
struct rfapi_descriptor *rfd; /* which NVE requested the route */
struct ethaddr macaddr;
uint32_t logical_net_id;
- struct thread *timer;
+ struct event *timer;
};
/*
struct bgp *bgp = bgp_get_default();
uint32_t t_pfx_active = 0;
- uint32_t t_pfx_deleted = 0;
uint32_t t_ri_active = 0;
uint32_t t_ri_deleted = 0;
afi_t afi;
uint32_t pfx_active = 0;
- uint32_t pfx_deleted = 0;
for (afi = AFI_IP; afi < AFI_MAX; ++afi) {
if (dsl) {
ri_deleted = skiplist_count(dsl);
t_ri_deleted += ri_deleted;
- ++pfx_deleted;
- ++t_pfx_deleted;
}
}
for (rn = agg_route_top(rfd->rib_pending[afi]); rn;
if (goner->timer) {
struct rfapi_rib_tcb *tcb;
- tcb = THREAD_ARG(goner->timer);
- THREAD_OFF(goner->timer);
+ tcb = EVENT_ARG(goner->timer);
+ EVENT_OFF(goner->timer);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
}
XFREE(MTYPE_RFAPI_INFO, goner);
/*
* remove route from rib
*/
-static void rfapiRibExpireTimer(struct thread *t)
+static void rfapiRibExpireTimer(struct event *t)
{
- struct rfapi_rib_tcb *tcb = THREAD_ARG(t);
+ struct rfapi_rib_tcb *tcb = EVENT_ARG(t);
RFAPI_RIB_CHECK_COUNTS(1, 0);
struct rfapi_rib_tcb *tcb = NULL;
if (ri->timer) {
- tcb = THREAD_ARG(ri->timer);
- THREAD_OFF(ri->timer);
+ tcb = EVENT_ARG(ri->timer);
+ EVENT_OFF(ri->timer);
} else {
tcb = XCALLOC(MTYPE_RFAPI_RECENT_DELETE,
sizeof(struct rfapi_rib_tcb));
vnc_zlog_debug_verbose("%s: rfd %p pfx %pRN life %u", __func__, rfd, rn,
ri->lifetime);
- thread_add_timer(bm->master, rfapiRibExpireTimer, tcb, ri->lifetime,
- &ri->timer);
+ event_add_timer(bm->master, rfapiRibExpireTimer, tcb, ri->lifetime,
+ &ri->timer);
}
extern void rfapi_rib_key_init(struct prefix *prefix, /* may be NULL */
if (ri->timer) {
struct rfapi_rib_tcb *tcb;
- tcb = THREAD_ARG(ri->timer);
- THREAD_OFF(ri->timer);
+ tcb = EVENT_ARG(ri->timer);
+ EVENT_OFF(ri->timer);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
}
if (ori->timer) {
struct rfapi_rib_tcb *tcb;
- tcb = THREAD_ARG(ori->timer);
- THREAD_OFF(ori->timer);
+ tcb = EVENT_ARG(ori->timer);
+ EVENT_OFF(ori->timer);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
}
if (ri->timer) {
struct rfapi_rib_tcb *tcb;
- tcb = THREAD_ARG(ri->timer);
- THREAD_OFF(ri->timer);
+ tcb = EVENT_ARG(ri->timer);
+ EVENT_OFF(ri->timer);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
}
RFAPI_RIB_CHECK_COUNTS(0, delete_list->count);
int printedheader = 0;
int routes_total = 0;
int nhs_total = 0;
- int prefixes_total = 0;
- int prefixes_displayed = 0;
- int nves_total = 0;
- int nves_with_routes = 0;
int nves_displayed = 0;
int routes_displayed = 0;
int nhs_displayed = 0;
int printednve = 0;
afi_t afi;
- ++nves_total;
- if (rfd->rib_prefix_count)
- ++nves_with_routes;
-
for (afi = AFI_IP; afi < AFI_MAX; ++afi) {
struct agg_node *rn;
routes_total++;
nhs_total += skiplist_count(sl);
- ++prefixes_total;
if (pfx_match && !prefix_match(pfx_match, p)
&& !prefix_match(p, pfx_match))
continue;
- ++prefixes_displayed;
-
if (!printedheader) {
++printedheader;
struct bgp_tea_options *tea_options;
struct rfapi_un_option *un_options;
struct rfapi_vn_option *vn_options;
- struct thread *timer;
+ struct event *timer;
};
/*
if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED) && bpi->extra
&& bpi->extra->vnc.import.timer) {
- struct thread *t =
- (struct thread *)bpi->extra->vnc.import.timer;
+ struct event *t = (struct event *)bpi->extra->vnc.import.timer;
+
r = snprintf(p, REMAIN, " [%4lu] ",
- thread_timer_remain_second(t));
+ event_timer_remain_second(t));
INCP;
} else {
const char *vty_newline;
int printedheader = 0;
-
- int nves_total = 0;
- int nves_with_queries = 0;
- int nves_displayed = 0;
-
int queries_total = 0;
int queries_displayed = 0;
struct agg_node *rn;
int printedquerier = 0;
-
- ++nves_total;
-
- if (rfd->mon
- || (rfd->mon_eth && skiplist_count(rfd->mon_eth))) {
- ++nves_with_queries;
- } else {
+ if (!rfd->mon &&
+ !(rfd->mon_eth && skiplist_count(rfd->mon_eth)))
continue;
- }
/*
* IP Queries
fp(out, "%-15s %-15s", buf_vn, buf_un);
printedquerier = 1;
-
- ++nves_displayed;
} else
fp(out, "%-15s %-15s", "", "");
buf_remain[0] = 0;
rfapiFormatSeconds(
- thread_timer_remain_second(m->timer),
+ event_timer_remain_second(m->timer),
buf_remain, BUFSIZ);
fp(out, " %-15s %-10s\n",
inet_ntop(m->p.family, &m->p.u.prefix,
fp(out, "%-15s %-15s", buf_vn, buf_un);
printedquerier = 1;
-
- ++nves_displayed;
} else
fp(out, "%-15s %-15s", "", "");
buf_remain[0] = 0;
- rfapiFormatSeconds(thread_timer_remain_second(
+ rfapiFormatSeconds(event_timer_remain_second(
mon_eth->timer),
buf_remain, BUFSIZ);
fp(out, " %-17s %10d %-10s\n",
time_t age;
char buf_age[BUFSIZ];
- struct thread *t =
- (struct thread *)bpi->extra->vnc.import.timer;
- remaining = thread_timer_remain_second(t);
+ struct event *t = (struct event *)bpi->extra->vnc.import.timer;
+ remaining = event_timer_remain_second(t);
#ifdef RFAPI_REGISTRATIONS_REPORT_AGE
/*
}
if (tun_type != BGP_ENCAP_TYPE_MPLS && bpi->extra) {
uint32_t l = decode_label(&bpi->extra->label[0]);
+
if (!MPLS_LABEL_IS_NULL(l)) {
fp(out, " Label: %d", l);
if (nlines == 1)
* export expiration timer is already running on
* this route: cancel it
*/
- THREAD_OFF(eti->timer);
+ EVENT_OFF(eti->timer);
bgp_update(peer, prefix, /* prefix */
0, /* addpath_id */
bgp_attr_unintern(&iattr);
}
-static void vncExportWithdrawTimer(struct thread *t)
+static void vncExportWithdrawTimer(struct event *t)
{
- struct vnc_export_info *eti = THREAD_ARG(t);
+ struct vnc_export_info *eti = EVENT_ARG(t);
const struct prefix *p = agg_node_get_prefix(eti->node);
/*
if (!eti->timer && eti->lifetime <= INT32_MAX) {
eti->timer = NULL;
- thread_add_timer(bm->master, vncExportWithdrawTimer, eti,
- eti->lifetime, &eti->timer);
+ event_add_timer(bm->master, vncExportWithdrawTimer, eti,
+ eti->lifetime, &eti->timer);
vnc_zlog_debug_verbose(
"%s: set expiration timer for %u seconds", __func__,
eti->lifetime);
* already running on
* this route: cancel it
*/
- THREAD_OFF(eti->timer);
+ EVENT_OFF(eti->timer);
vnc_zlog_debug_verbose(
"%s: calling bgp_update",
ZEBRA_ROUTE_VNC_DIRECT_RH,
BGP_ROUTE_REDISTRIBUTE);
if (eti) {
- THREAD_OFF(eti->timer);
+ EVENT_OFF(eti->timer);
vnc_eti_delete(eti);
}
#define _QUAGGA_VNC_VNC_EXPORT_TABLE_H_
#include "lib/table.h"
-#include "lib/thread.h"
+#include "frrevent.h"
#include "lib/vty.h"
#include "bgpd/bgpd.h"
uint8_t type;
uint8_t subtype;
uint32_t lifetime;
- struct thread *timer;
+ struct event *timer;
};
extern struct agg_node *vnc_etn_get(struct bgp *bgp, vnc_export_type_t type,
if (RFAPI_HAS_MONITOR_EXTERIOR(rn_interior)) {
- int count = 0; /* debugging */
-
vnc_zlog_debug_verbose(
"%s: has exterior monitor; ext src: %p", __func__,
RFAPI_MONITOR_EXTERIOR(rn_interior)->source);
struct attr new_attr;
uint32_t label = 0;
-
- ++count; /* debugging */
-
assert(bpi_exterior);
assert(pfx_exterior);
* Modeled after bgp_zebra.c'bgp_zebra_init()
* Charriere asks, "Is it possible to carry two?"
*/
-void vnc_zebra_init(struct thread_master *master)
+void vnc_zebra_init(struct event_loop *master)
{
/* Set default values. */
zclient_vnc = zclient_new(master, &zclient_options_default,
struct rfp_instance_t {
struct rfapi_rfp_cfg rfapi_config;
struct rfapi_rfp_cb_methods rfapi_callbacks;
- struct thread_master *master;
+ struct event_loop *master;
uint32_t config_var;
};
* rfp_start_val rfp returned value passed on rfp_stop and rfp_cfg_write
*
--------------------------------------------*/
-void *rfp_start(struct thread_master *master, struct rfapi_rfp_cfg **cfgp,
+void *rfp_start(struct event_loop *master, struct rfapi_rfp_cfg **cfgp,
struct rfapi_rfp_cb_methods **cbmp)
{
memset(&global_rfi, 0, sizeof(global_rfi));
AS_HELP_STRING([--disable-zebra], [do not build zebra daemon]))
AC_ARG_ENABLE([bgpd],
AS_HELP_STRING([--disable-bgpd], [do not build bgpd]))
+AC_ARG_ENABLE([mgmtd],
+ AS_HELP_STRING([--disable-mgmtd], [do not build mgmtd]))
+AC_ARG_ENABLE([mgmtd_local_validations],
+ AS_HELP_STRING([--enable-mgmtd-local-validations], [dev: unimplemented local validation]))
AC_ARG_ENABLE([ripd],
AS_HELP_STRING([--disable-ripd], [do not build ripd]))
AC_ARG_ENABLE([ripngd],
AC_DEFINE([KEEP_OLD_VPN_COMMANDS], [1], [Define for compiling with old vpn commands])
fi
-#
-# End of logic for protobuf support.
-#
-
AC_MSG_CHECKING([if zebra should be configurable to send Route Advertisements])
if test "$enable_rtadv" != "no"; then
AC_MSG_RESULT([yes])
# Logic for protobuf support.
#
PROTO3=false
-if test "$enable_protobuf" = "yes"; then
- # Check for protoc & protoc-c
-
- # protoc is not required, it's only for a "be nice" helper target
- AC_CHECK_PROGS([PROTOC], [protoc], [/bin/false])
+# Enable Protobuf by default at all times.
+# Check for protoc & protoc-c
+# protoc is not required, it's only for a "be nice" helper target
+AC_CHECK_PROGS([PROTOC], [protoc], [/bin/false])
- AC_CHECK_PROGS([PROTOC_C], [protoc-c], [/bin/false])
- if test "$PROTOC_C" = "/bin/false"; then
- AC_MSG_FAILURE([protobuf requested but protoc-c not found. Install protobuf-c.])
- fi
+AC_CHECK_PROGS([PROTOC_C], [protoc-c], [/bin/false])
+if test "$PROTOC_C" = "/bin/false"; then
+ AC_MSG_FAILURE([protobuf requested but protoc-c not found. Install protobuf-c.])
+fi
- PKG_CHECK_MODULES([PROTOBUF_C], [libprotobuf-c >= 0.14],, [
- AC_MSG_FAILURE([protobuf requested but libprotobuf-c not found. Install protobuf-c.])
- ])
+PKG_CHECK_MODULES([PROTOBUF_C], [libprotobuf-c >= 1.1.0],, [
+ AC_MSG_FAILURE([minimum version (1.1.0) of libprotobuf-c not found. Install minimum required version of protobuf-c.])
+])
+if test "$enable_protobuf3" = "yes"; then
PROTO3=true
AC_CHECK_HEADER([google/protobuf-c/protobuf-c.h],
[AC_CHECK_DECLS(PROTOBUF_C_LABEL_NONE,
[1], [Have Protobuf version 3]),
[PROTO3=false],
[#include <google/protobuf-c/protobuf-c.h>])],
- [PROTO3=false && AC_MSG_FAILURE([protobuf requested but protobuf-c.h not found. Install protobuf-c.])])
-
- AC_DEFINE([HAVE_PROTOBUF], [1], [protobuf])
+ [PROTO3=false && AC_MSG_FAILURE([protobuf3 requested but protobuf-c.h not found. Install protobuf-c.])])
fi
+AC_DEFINE([HAVE_PROTOBUF], [1], [protobuf])
+#
+# End of logic for protobuf support.
+#
+
dnl ---------------------
dnl Integrated VTY option
AC_DEFINE([HAVE_BGPD], [1], [bgpd])
])
+AS_IF([test "$enable_mgmtd" != "no"], [
+
+ AC_DEFINE([HAVE_MGMTD], [1], [mgmtd])
+
+ # Enable MGMTD local validations
+ AS_IF([test "$enable_mgmtd_local_validations" == "yes"], [
+ AC_DEFINE([MGMTD_LOCAL_VALIDATIONS_ENABLED], [1], [Enable mgmtd local validations.])
+ ])
+])
+
AS_IF([test "$enable_ripd" != "no"], [
AC_DEFINE([HAVE_RIPD], [1], [ripd])
])
AC_DEFINE_UNQUOTED([ZEBRA_SERV_PATH], ["$frr_statedir%s%s/zserv.api"], [zebra api socket])
AC_DEFINE_UNQUOTED([BFDD_CONTROL_SOCKET], ["$frr_statedir%s%s/bfdd.sock"], [bfdd control socket])
AC_DEFINE_UNQUOTED([OSPFD_GR_STATE], ["$frr_statedir%s/ospfd-gr.json"], [ospfd GR state information])
+AC_DEFINE_UNQUOTED([MGMTD_FE_SERVER_PATH], ["$frr_statedir/mgmtd_fe.sock"], [mgmtd frontend server socket])
+AC_DEFINE_UNQUOTED([MGMTD_BE_SERVER_PATH], ["$frr_statedir/mgmtd_be.sock"], [mgmtd backend server socket])
AC_DEFINE_UNQUOTED([OSPF6D_GR_STATE], ["$frr_statedir/ospf6d-gr.json"], [ospf6d GR state information])
AC_DEFINE_UNQUOTED([ISISD_RESTART], ["$frr_statedir%s/isid-restart.json"], [isisd restart information])
AC_DEFINE_UNQUOTED([OSPF6_AUTH_SEQ_NUM_FILE], ["$frr_statedir/ospf6d-at-seq-no.dat"], [ospf6d AT Sequence number information])
CFG_SYSCONF="$sysconfdir"
CFG_SBIN="$sbindir"
+CFG_BIN="$bindir"
CFG_STATE="$frr_statedir"
CFG_MODULE="$moduledir"
CFG_YANGMODELS="$yangmodelsdir"
for I in 1 2 3 4 5 6 7 8 9 10; do
eval CFG_SYSCONF="\"$CFG_SYSCONF\""
eval CFG_SBIN="\"$CFG_SBIN\""
+ eval CFG_BIN="\"$CFG_BIN\""
eval CFG_STATE="\"$CFG_STATE\""
eval CFG_MODULE="\"$CFG_MODULE\""
eval CFG_YANGMODELS="\"$CFG_YANGMODELS\""
done
AC_SUBST([CFG_SYSCONF])
AC_SUBST([CFG_SBIN])
+AC_SUBST([CFG_BIN])
AC_SUBST([CFG_STATE])
AC_SUBST([CFG_MODULE])
AC_SUBST([CFG_SCRIPT])
AM_CONDITIONAL([SNMP], [test "$SNMP_METHOD" = "agentx"])
AM_CONDITIONAL([IRDP], [$IRDP])
AM_CONDITIONAL([FPM], [test "$enable_fpm" = "yes"])
-AM_CONDITIONAL([HAVE_PROTOBUF], [test "$enable_protobuf" = "yes"])
+AM_CONDITIONAL([HAVE_PROTOBUF], [test "$enable_protobuf" != "no"])
AM_CONDITIONAL([HAVE_PROTOBUF3], [$PROTO3])
dnl PCEP plugin
AM_CONDITIONAL([VTYSH], [test "$VTYSH" = "vtysh"])
AM_CONDITIONAL([ZEBRA], [test "$enable_zebra" != "no"])
AM_CONDITIONAL([BGPD], [test "$enable_bgpd" != "no"])
+AM_CONDITIONAL([MGMTD], [test "$enable_mgmtd" != "no"])
AM_CONDITIONAL([RIPD], [test "$enable_ripd" != "no"])
AM_CONDITIONAL([OSPFD], [test "$enable_ospfd" != "no"])
AM_CONDITIONAL([LDPD], [test "$enable_ldpd" != "no"])
alpine/APKBUILD
snapcraft/snapcraft.yaml
lib/version.h
- tests/lib/cli/test_cli.refout
+ tests/lib/cli/test_cli.refout pkgsrc/mgmtd.sh
pkgsrc/bgpd.sh pkgsrc/ospf6d.sh pkgsrc/ospfd.sh
pkgsrc/ripd.sh pkgsrc/ripngd.sh pkgsrc/zebra.sh
pkgsrc/eigrpd.sh])
usr/lib/*/frr/libfrr.*
usr/lib/*/frr/libfrrcares.*
usr/lib/*/frr/libfrrospfapiclient.*
+usr/lib/*/frr/libmgmt_be_nb.*
usr/lib/*/frr/modules/bgpd_bmp.so
usr/lib/*/frr/modules/dplane_fpm_nl.so
usr/lib/*/frr/modules/zebra_cumulus_mlag.so
sudo yum install git autoconf automake libtool make \
readline-devel texinfo net-snmp-devel groff pkgconfig \
json-c-devel pam-devel flex epel-release c-ares-devel libcap-devel \
- elfutils-libelf-devel
+ elfutils-libelf-devel protobuf-c-devel
Install newer version of bison (CentOS 6 package source is too old) from CentOS
7:
readline-devel texinfo net-snmp-devel groff pkgconfig \
json-c-devel pam-devel bison flex pytest c-ares-devel \
python-devel python-sphinx libcap-devel \
- elfutils-libelf-devel libunwind-devel
+ elfutils-libelf-devel libunwind-devel protobuf-c-devel
.. include:: building-libunwind-note.rst
automake libtool make readline-devel texinfo net-snmp-devel pkgconfig \
groff pkgconfig json-c-devel pam-devel bison flex python2-pytest \
c-ares-devel python2-devel libcap-devel \
- elfutils-libelf-devel libunwind-devel
+ elfutils-libelf-devel libunwind-devel \
+ protobuf-c-devel
.. include:: building-libunwind-note.rst
sudo apt-get install git autoconf automake libtool make \
libreadline-dev texinfo libjson-c-dev pkg-config bison flex python3-pip \
libc-ares-dev python3-dev python3-sphinx build-essential \
- libsnmp-dev libcap-dev libelf-dev
+ libsnmp-dev libcap-dev libelf-dev libprotobuf-c-dev protobuf-c-compiler
Install newer pytest (>3.0) from pip
sudo apt-get install git autoconf automake libtool make \
libreadline-dev texinfo libjson-c-dev pkg-config bison flex \
libc-ares-dev python3-dev python3-pytest python3-sphinx build-essential \
- libsnmp-dev libcap-dev libelf-dev libunwind-dev
+ libsnmp-dev libcap-dev libelf-dev libunwind-dev \
+ libprotobuf-c-dev protobuf-c-compiler
.. include:: building-libunwind-note.rst
readline-devel texinfo net-snmp-devel groff pkgconfig json-c-devel \
pam-devel python3-pytest bison flex c-ares-devel python3-devel \
python3-sphinx perl-core patch libcap-devel \
- elfutils-libelf-devel libunwind-devel
+ elfutils-libelf-devel libunwind-devel protobuf-c-devel
.. include:: building-libunwind-note.rst
::
pkg install git autoconf automake libtool gmake json-c pkgconf \
- bison flex py36-pytest c-ares python3.6 py36-sphinx libunwind
+ bison flex py36-pytest c-ares python3.6 py36-sphinx libunwind \
+ protobuf-c
.. include:: building-libunwind-note.rst
.. code-block:: shell
pkg install git autoconf automake libtool gmake json-c pkgconf \
- bison flex py36-pytest c-ares python3.6 py36-sphinx texinfo libunwind
+ bison flex py36-pytest c-ares python3.6 py36-sphinx texinfo libunwind \
+ protobuf-c
.. include:: building-libunwind-note.rst
pkg install -y git autoconf automake libtool gmake \
pkgconf texinfo json-c bison flex py36-pytest c-ares \
- python3 py36-sphinx libexecinfo
+ python3 py36-sphinx libexecinfo protobuf-c
Make sure there is no /usr/bin/flex preinstalled (and use the newly
installed in /usr/local/bin): (FreeBSD frequently provides a older flex
::
sudo pkg_add git autoconf automake libtool gmake openssl \
- pkg-config json-c py36-test python36 py36-sphinx
+ pkg-config json-c py36-test python36 py36-sphinx \
+ protobuf-c
Install SSL Root Certificates (for git https access):
::
sudo pkgin install git autoconf automake libtool gmake openssl \
- pkg-config json-c python36 py36-test py36-sphinx
+ pkg-config json-c python36 py36-test py36-sphinx \
+ protobuf-c
Install SSL Root Certificates (for git https access):
pkg_add clang libcares python3
pkg_add git autoconf-2.69p2 automake-1.15.1 libtool bison
- pkg_add gmake json-c py-test py-sphinx libexecinfo
+ pkg_add gmake json-c py-test py-sphinx libexecinfo protobuf-c
Select Python2.7 as default (required for pytest)
readline-devel texinfo net-snmp-devel groff pkgconfig libjson-c-devel\
pam-devel python3-pytest bison flex c-ares-devel python3-devel\
python3-Sphinx perl patch libcap-devel libyang-devel \
- libelf-devel libunwind-devel
+ libelf-devel libunwind-devel protobuf-c
.. include:: building-libunwind-note.rst
.. include:: building-libyang.rst
+Protobuf
+^^^^^^^^
+
+.. code-block:: console
+
+ sudo apt-get install protobuf-c-compiler libprotobuf-c-dev
+
Building & Installing FRR
-------------------------
pkg-config libpam0g-dev libjson-c-dev bison flex python3-pytest \
libc-ares-dev python3-dev python-ipaddress python3-sphinx \
install-info build-essential libsnmp-dev perl libcap-dev \
- libelf-dev
+ libelf-dev libprotobuf-c-dev protobuf-c-compiler
.. include:: building-libyang.rst
+Protobuf
+^^^^^^^^
+
+.. code-block:: console
+
+ sudo apt-get install protobuf-c-compiler libprotobuf-c-dev
+
Building & Installing FRR
-------------------------
FRR library helper formats
^^^^^^^^^^^^^^^^^^^^^^^^^^
-.. frrfmt:: %pTH (struct thread *)
+.. frrfmt:: %pTH (struct event *)
- Print remaining time on timer thread. Interval-printing flag characters
+ Print remaining time on timer event. Interval-printing flag characters
listed above for ``%pTV`` can be added, e.g. ``%pTHtx``.
``NULL`` pointers are printed as ``-``.
-.. frrfmt:: %pTHD (struct thread *)
+.. frrfmt:: %pTHD (struct event *)
- Print debugging information for given thread. Sample output:
+ Print debugging information for given event. Sample output:
.. code-block:: none
#include "hook.h"
#include "module.h"
#include "libfrr.h"
- #include "thread.h"
+ #include "frrevent.h"
- static int module_late_init(struct thread_master *master)
+ static int module_late_init(struct event_loop *master)
{
/* Do initialization stuff here */
return 0;
Part of the controller code runs in FRR main thread and part runs in its own
FRR pthread started to isolate the main thread from the PCCs' event loop.
To communicate between the threads it uses FRR events, timers and
-`thread_execute` calls.
+`event_execute` calls.
PCC
datastructure that holds the state of an event loop in this system is called a
"threadmaster". Events scheduled on the event loop - what would today be called
an 'event' or 'task' in systems such as libevent - are called "threads" and the
-datastructure for them is ``struct thread``. To add to the confusion, these
+datastructure for them is ``struct event``. To add to the confusion, these
"threads" have various types, one of which is "event". To hopefully avoid some
of this confusion, this document refers to these "threads" as a 'task' except
where the datastructures are explicitly named. When they are explicitly named,
interplay between the event system and kernel threads.
The core event system is implemented in :file:`lib/thread.[ch]`. The primary
-structure is ``struct thread_master``, hereafter referred to as a
+structure is ``struct event_loop``, hereafter referred to as a
``threadmaster``. A ``threadmaster`` is a global state object, or context, that
holds all the tasks currently pending execution as well as statistics on tasks
that have already executed. The event system is driven by adding tasks to this
fetch each task and execute it.
These tasks have various types corresponding to their general action. The types
-are given by integer macros in :file:`thread.h` and are:
+are given by integer macros in :file:`event.h` and are:
``THREAD_READ``
Task which waits for a file descriptor to become ready for reading and then
Type used internally for tasks on the ready queue.
``THREAD_UNUSED``
- Type used internally for ``struct thread`` objects that aren't being used.
- The event system pools ``struct thread`` to avoid heap allocations; this is
+ Type used internally for ``struct event`` objects that aren't being used.
+ The event system pools ``struct event`` to avoid heap allocations; this is
the type they have when they're in the pool.
``THREAD_EXECUTE``
::
- thread_add_read(struct thread_master *master, int (*handler)(struct thread *), void *arg, int fd, struct thread **ref);
+ event_add_read(struct event_loop *master, int (*handler)(struct event *), void *arg, int fd, struct event **ref);
-The ``struct thread`` is then created and added to the appropriate internal
+The ``struct event`` is then created and added to the appropriate internal
datastructure within the ``threadmaster``. Note that the ``READ`` and
``WRITE`` tasks are independent - a ``READ`` task only tests for
readability, for example.
startup the first task added is an I/O task for VTYSH as well as any network
sockets needed for peerings or IPC.
-To retrieve the next task to run the program calls ``thread_fetch()``.
-``thread_fetch()`` internally computes which task to execute next based on
+To retrieve the next task to run the program calls ``event_fetch()``.
+``event_fetch()`` internally computes which task to execute next based on
rudimentary priority logic. Events (type ``THREAD_EVENT``) execute with the
highest priority, followed by expired timers and finally I/O tasks (type
``THREAD_READ`` and ``THREAD_WRITE``). When scheduling a task a function and an
-arbitrary argument are provided. The task returned from ``thread_fetch()`` is
-then executed with ``thread_call()``.
+arbitrary argument are provided. The task returned from ``event_fetch()`` is
+then executed with ``event_call()``.
The following diagram illustrates a simplified version of this infrastructure.
Mapping the general names used in the figure to specific FRR functions:
-- ``task`` is ``struct thread *``
-- ``fetch`` is ``thread_fetch()``
-- ``exec()`` is ``thread_call``
-- ``cancel()`` is ``thread_cancel()``
-- ``schedule()`` is any of the various task-specific ``thread_add_*`` functions
+- ``task`` is ``struct event *``
+- ``fetch`` is ``event_fetch()``
+- ``exec()`` is ``event_call``
+- ``cancel()`` is ``event_cancel()``
+- ``schedule()`` is any of the various task-specific ``event_add_*`` functions
Adding tasks is done with various task-specific function-like macros. These
macros wrap underlying functions in :file:`thread.c` to provide additional
information added at compile time, such as the line number the task was
scheduled from, that can be accessed at runtime for debugging, logging and
informational purposes. Each task type has its own specific scheduling function
-that follow the naming convention ``thread_add_<type>``; see :file:`thread.h`
+that follow the naming convention ``event_add_<type>``; see :file:`event.h`
for details.
There are some gotchas to keep in mind:
communication and boils down to a slightly more complicated method of message
passing, where the messages are the regular task events as used in the
event-driven model. The only difference is thread cancellation, which requires
-calling ``thread_cancel_async()`` instead of ``thread_cancel`` to cancel a task
+calling ``event_cancel_async()`` instead of ``event_cancel`` to cancel a task
currently scheduled on a ``threadmaster`` belonging to a different pthread.
This is necessary to avoid race conditions in the specific case where one
pthread wants to guarantee that a task on another pthread is cancelled before
The ``thread_master`` code currently always holds RCU everywhere, except
while doing the actual ``poll()`` syscall. This is both an optimization as
well as an "easement" into getting RCU going. The current implementation
- contract is that any ``struct thread *`` callback is called with a RCU
+ contract is that any ``struct event *`` callback is called with a RCU
holding depth of 1, and that this is owned by the thread so it may (should)
drop and reacquire it when doing some longer-running work.
frr_libfrr:frr_pthread_stop (loglevel: TRACE_DEBUG_LINE (13)) (type: tracepoint)
frr_libfrr:frr_pthread_run (loglevel: TRACE_DEBUG_LINE (13)) (type: tracepoint)
frr_libfrr:thread_call (loglevel: TRACE_INFO (6)) (type: tracepoint)
- frr_libfrr:thread_cancel_async (loglevel: TRACE_INFO (6)) (type: tracepoint)
- frr_libfrr:thread_cancel (loglevel: TRACE_INFO (6)) (type: tracepoint)
+ frr_libfrr:event_cancel_async (loglevel: TRACE_INFO (6)) (type: tracepoint)
+ frr_libfrr:event_cancel (loglevel: TRACE_INFO (6)) (type: tracepoint)
frr_libfrr:schedule_write (loglevel: TRACE_INFO (6)) (type: tracepoint)
frr_libfrr:schedule_read (loglevel: TRACE_INFO (6)) (type: tracepoint)
frr_libfrr:schedule_event (loglevel: TRACE_INFO (6)) (type: tracepoint)
Display a usage message on standard output and exit.
+.. option:: -t, --timestamp
+
+ Print a timestamp before going to shell or reading the configuration file.
+
+.. option:: --no-fork
+
+ When used in conjunction with ``-b``, prevents vtysh from forking children to handle configuring each target daemon.
+
+
ENVIRONMENT VARIABLES
=====================
VTYSH_PAGER
Allow using IPv4 reserved (Class E) IP ranges for daemons. E.g.: setting
IPv4 addresses for interfaces or allowing reserved ranges in BGP next-hops.
+ If you need multiple FRR instances (or FRR + any other daemon) running in a
+ single router and peering via 127.0.0.0/8, it's also possible to use this
+ knob if turned on.
+
Default: off.
.. _sample-config-file:
vrrp
bmp
watchfrr
+ mgmtd
########
Appendix
--- /dev/null
+.. _mgmtd:
+
+*************************
+MGMTd (Management Daemon)
+*************************
+
+The FRR Management Daemon (from now on referred to as MGMTd) is a new
+centralized entity representing the FRR Management Plane which can take
+management requests from any kind of UI/Frontend entity (e.g. CLI, Netconf,
+Restconf, Grpc etc.) over a new unified and common Frontend interface and
+can help maintain configurational data or retrieve operational data from
+any number of FRR managed entities/components that have been integrated
+with the new FRR Centralised Management Framework.
+
+For organizing the management data to be owned by the FRR Management plane,
+management data is stored in YANG in compliance with a pre-defined set
+of YANG based schema. Data shall also be stored/retrieved in YANG format only.
+
+The MGMTd also acts as a separate computational entity for offloading much
+of the management related computational overload involved in maintaining of
+management data and processing of management requests, from individual
+component daemons (which can otherwise be a signficant burden on the individual
+components, affecting performance of its other functionalities).
+
+Lastly, the MGMTd works in-tandem with one (or more) MGMT Frontend
+Clients and a bunch of MGMT Backend Clients to realize the entirety
+of the FRR Management plane. Some of the advanatages of this new framework
+are:
+
+ 1. Consolidation and management of all Management data by a single entity.
+ 2. Better control over configuration validation, commit and rollback.
+ 3. Faster collection of configuration data (without needing to involve
+ individual component daemons).
+ 4. Offload computational burden of YANG data parsing and validations
+ of new configuration data being provisoned away from individual
+ component daemons
+ 5. Improve performance of individual component daemons while loading
+ huge configuration or retrieving huge operational dataset.
+
+The new FRR Management Daemon consists of the following sub-components:
+ - MGMT Frontend Interface
+ - MGMT Backend Interface
+ - MGMT Transaction Engine
+
+.. _mgmt_fe:
+
+MGMT Frontend Interface
+=======================
+
+The MGMT Frontend Interface is a bunch of message-based APIs that lets
+any UI/Frontend client to interact with the MGMT daemon to requests a
+set of management operations on a specific datastore/database.
+Following is a list of databases/datastores supported by the MGMT
+Frontend Interface and MGMTd:
+
+ - Candidate Database:
+
+ - Consists of configuration data items only.
+ - Data can be edited anytime using SET_CONFIG API.
+ - Data can be retrieved anytime using GET_CONFIG/GET_DATA API.
+
+ - Running Database:
+
+ - Consists of configuration data items only.
+ - Data cannot be edited using SET_CONFIG API.
+ - Data can only be modified using COMMIT_CONFIG API after which un-committed
+ data from Candidate database will be first validated and applied to
+ individualBackend component(s). Only on successful validation and apply on
+ all individual components will the new data be copied over to the Running
+ database.
+ - Data can be retrieved anytime using GET_CONFIG/GET_DATA API.
+
+ - Startup Database:
+
+ - Consists of configuration data items only.
+ - This is a copy of Running database that is stored in persistent
+ storage and is used to load configurations on Running database during
+ MGMT daemon startup.
+ - Data cannot be edited/retrieved directly via Frontend interface.
+
+ - Operational Database:
+
+ - Consists of non-configurational data items.
+ - Data is not stored on MGMT daemon. Rather it will be need to be fetched
+ in real-time from the corresponding Backend component (if present).
+ - Data can be retrieved anytime using GET_DATA API.
+
+Frontend Clients connected to MGMTd via Frontend Interface can themselves have
+multiple connections from one (or more) of its own remote clients. The MGMT
+Frontend Interface supports reresenting each of the remote clients for a given
+Frontend client(e.g. Netconf clients on a single Netconf server) as individual
+Frontend Client Sessions. So a single connection from a single Frontend Client
+can create more than one Frontend Client sessions.
+
+Following are some of the management operations supported:
+ - INIT_SESSION/CLOSE_SESSION: Create/Destroy a session. Rest of all the
+ operations are supported only in the context of a specific session.
+ - LOCK_DB/UNLOCK_DB: Lock/Unlock Management datastores/databases.
+ - GET_CONFIG/GET_DATA: Retrieve configurational/operational data from a
+ specific datastore/database.
+ - SET_CONFIG/DELETE_CONFIG: Add/Modify/Delete specific data in a specific
+ datastore/database.
+ - COMMIT_CONFIG: Validate and/or apply the uncommited set of configurations
+ from one configuration database to another.
+ - Currently committing configurations from Candidate to Running database
+ is only allowed, and not vice versa.
+
+The exact set of message-based APIs are represented as Google Protobuf
+messages and can be found in the following file distributed with FRR codebase.
+
+.. code-block:: frr
+
+ lib/mgmt.proto
+
+The MGMT daemon implements a MGMT Frontend Server that opens a UNIX
+socket-based IPC channel on the following path to listen for incoming
+connections from all possible Frontend clients:
+
+.. code-block:: frr
+
+ /var/run/frr/mgmtd_fe.sock
+
+Each connection received from a Frontend client is managed and tracked
+as a MGMT Frontend adapter by the MGMT Frontend Adapter sub-component
+implemented by MGMTd.
+
+To facilitate faster development/integration of Frontend clients with
+MGMT Frontend Interface, a C-based library has been developed. The API
+specification of this library can be found at:
+
+.. code-block:: frr
+
+ lib/mgmt_fe_client.h
+
+Following is a list of message types supported on the MGMT Frontend Interface:
+ - SESSION_REQ<Client-Connection-Id, Destroy>
+ - SESSION_REPLY<Client-Connection-Id, Destroy, Session-Id>
+ - LOCK_DB_REQ <Session-Id, Database-Id>
+ - LOCK_DB_REPLY <Session-Id, Database-Id>
+ - UNLOCK_DB_REQ <Session-Id, Database-Id>
+ - UNLOCK_DB_REPLY <Session-Id, Database-Id>
+ - GET_CONFIG_REQ <Session-Id, Database-Id, Base-Yang-Xpath>
+ - GET_CONFIG_REPLY <Session-Id, Database-Id, Base-Yang-Xpath, Yang-Data-Set>
+ - SET_CONFIG_REQ <Session-Id, Database-Id, Base-Yang-Xpath, Delete, ...>
+ - SET_CONFIG_REPLY <Session-Id, Database-id, Base-Yang-Xpath, ..., Status>
+ - COMMIT_CONFIG_REQ <Session-Id, Source-Db-Id, Dest-Db-Id>
+ - COMMIT_CONFIG_REPLY <Session-Id, Source-Db-id, Dest-Db-Id, Status>
+ - GET_DATA_REQ <Session-Id, Database-Id, Base-Yang-Xpath>
+ - GET_DATA_REPLY <Session-Id, Database-id, Base-Yang-Xpath, Yang-Data-Set>
+ - REGISTER_NOTIFY_REQ <Session-Id, Database-Id, Base-Yang-Xpath>
+ - DATA_NOTIFY_REQ <Database-Id, Base-Yang-Xpath, Yang-Data-Set>
+
+Please refer to the MGMT Frontend Client Developers Reference and Guide
+(coming soon) for more details.
+
+MGMTD Backend Interface
+=======================
+The MGMT Backend Interface is a bunch of message-based APIs that can be
+used by individual component daemons like BGPd, Staticd, Zebra to connect
+with MGMTd and utilize the new FRR Management Framework to let any Frontend
+clients to retrieve any operational data or manipulate any configuration data
+owned by the individual daemon component.
+
+Like the MGMT Frontend Interface, the MGMT Backend Interface is is also
+comprised of the following:
+
+ - MGMT Backend Server (running on MGMT daemon)
+ - MGMT Backend Adapter (running on MGMT daemon)
+ - MGMT Backend client (running on Backend component daemons)
+
+The MGMT Backend Client and MGMT Backend Adapter sub-component communicates
+using a specific set of message-based APIs.
+
+The exact set of message-based APIs are represented as Google Protobuf
+messages and can be found in the following file distributed with FRR codebase.
+
+.. code-block:: frr
+
+ lib/mgmt.proto
+
+The MGMT daemon implements a MGMT Backend Server that opens a UNIX
+socket-based IPC channel on the following path to listen for incoming
+connections from all possible Backend clients:
+
+.. code-block:: frr
+
+ /var/run/frr/mgmtd_be.sock
+
+Each connection received from a Backend client is managed and tracked
+as a MGMT Backend adapter by the MGMT Backend Adapter sub-component
+implemented by MGMTd.
+
+To facilitate faster development/integration of Backend clients with
+MGMTd, a C-based library has been developed. The API specification
+of this library can be found at:
+
+.. code-block:: frr
+
+ lib/mgmt_be_client.h
+
+Following is a list of message types supported on the MGMT Backend Interface:
+
+ - SUBSCRIBE_REQ <Req-Id, Base-Yang-Xpath, Filter-Type>
+ - SUBSCRIBE_REPLY <Req-Id, Status>
+ - TXN_REQ <Txn-Id, Create>
+ - TXN_REPLY <Txn-Id, Status>
+ - CREATE_CFGDATA_REQ <Txn-Id, Req-Id, Batch-Id, ConfigDataContents>
+ - CREATE_CFGDATA_ERROR <Txn-Id, Req-Id, Batch-Id, Status>
+ - VALIDATE_CFGDATA_REQ <Txn-Id, Batch-Id>
+ - VALIDATE_CFGDATA_REPLY <Txn-Id, Batch-Id, Status, ErrorInfo>
+ - APPLY_CFGDATA_REQ <Txn-Id, Batch-Id>
+ - APPLY_CFGDATA_REPLY <Txn-Id, Batch-Id, Status, ErrorInfo>
+ - GET_OPERDATA_REQ <Txn-Id, Base-Yang-Xpath, Filter-Type>
+ - GET_OPERDATA_REPLY <Txn-Id, OperDataContents>
+
+Please refer to the MGMT Backend Client Developers Reference and Guide
+(coming soon) for more details.
+
+MGMTD Transaction Engine
+========================
+
+The MGMT Transaction sub-component is the main brain of the MGMT daemon that
+takes management requests from one (or more) Frontend Client translates
+them into transactions and drives them to completion in co-oridination with
+one (or more) Backend client daemons involved in the request.
+
+A transaction can be seen as a set of management procedures executed over
+the Backend Interface with one (or more) individual Backend component
+daemons, as a result of some management request initiated from a specific
+Frontend client session. These group of operations on the Backend Interface
+with one (or more) individual components involved should be executed without
+taking any further management requests from other Frontend client sessions.
+To maintain this kind of atomic behavior a lock needs to be acquired
+(sometimes implicitly if not explicitly) by the corresponding Frontend client
+session, on the various datastores/databases involved in the management request
+being executed. The same datastores/databases need to be unlocked when all
+the procedures have been executed and the transaction is being closed.
+
+Following are some of the transaction types supported by MGMT:
+
+ - Configuration Transactions
+
+ - Used to execute management operations like SET_CONFIG and COMMIT_CONFIG
+ that involve writing/over-writing the contents of Candidate and Running
+ databases.
+ - One (and only) can be created and be in-progress at any given time.
+ - Once initiated by a specific Frontend Client session and is still
+ in-progress, all subsequent SET_CONFIG and COMMIT_CONFIG operations
+ from other Frontend Client sessions will be rejected and responded
+ with failure.
+ - Requires acquiring write-lock on Candidate (and later Running) databases.
+
+ - Show Transactions
+
+ - Used to execute management operations like GET_CONFIG and GET_DATA
+ that involve only reading the contents of Candidate and Running
+ databases (and sometimes real-time retrieval of operational data
+ from individual component daemons).
+ - Multiple instance of this transaction type can be created and be
+ in-progress at any given time.
+ - However, when a configuration transaction is currently in-progress
+ show transaction can be initiated by any Frontend Client session.
+ - Requires acquiring read-lock on Candidate and/or Running databases.
+ - NOTE: Currently GET_DATA on Operational database is NOT supported. To
+ be added in a future time soon.
+
+MGMTD Configuration Rollback and Commit History
+===============================================
+
+The MGMT daemon maintains upto 10 last configuration commit buffers
+and can rollback the contents of the Running Database to any of the
+commit-ids maintained in the commit buffers.
+
+Once the number of commit buffers exceeds 10, the oldest commit
+buffer is deleted to make space for the latest commit. Also on
+rollback to a specific commit-id, buffer of all the later commits
+are deleted from commit record.
+
+Configuration rollback is only allowed via VTYSH shell as of today
+and is not possible through the MGMT Frontend interface.
+
+MGMT Configuration commands
+===========================
+
+.. clicmd:: mgmt set-config XPATH VALUE
+
+ This command uses a SET_CONFIG request over the MGMT Frontend Interface
+ for the specified xpath with specific value. This command is used for
+ testing purpose only. But can be used to set configuration data from CLI
+ using SET_CONFIG operations.
+
+.. clicmd:: mgmt delete-config XPATH
+
+ This command uses a SET_CONFIG request (with delete option) over the
+ MGMT Frontend Interface o delete the YANG data node at the given
+ xpath unless it is a key-leaf node(in which case it is not deleted).
+
+.. clicmd:: mgmt load-config FILE <merge|replace>
+
+ This command loads configuration in JSON format from the filepath specified,
+ and merges or replaces the Candidate DB as per the option specified.
+
+.. clicmd:: mgmt save-config <candidate|running> FILE
+
+ This command dumps the DB specified in the db-name into the file in JSON
+ format. This command in not supported for the Operational DB.
+
+.. clicmd:: mgmt commit abort
+
+ This command will abort any configuration present on the Candidate but not
+ been applied to the Running DB.
+
+.. clicmd:: mgmt commit apply
+
+ This command commits any uncommited changes in the Candidate DB to the
+ Running DB. It also dumps a copy of the tree in JSON format into
+ frr_startup.json.
+
+.. clicmd:: mgmt commit check
+
+ This command validates the configuration but does not apply them to the
+ Running DB.
+
+.. clicmd:: mgmt rollback commit-id WORD
+
+ This command rolls back the Running Database contents to the state
+ corresponding to the commit-id specified.
+
+.. clicmd:: mgmt rollback last WORD
+
+ This command rolls back the last specified number of recent commits.
+
+
+MGMT Show commands
+==================
+
+.. clicmd:: show mgmt backend-adapter all
+
+ This command shows the backend adapter information and the clients/daemons
+ connected to the adapters.
+
+.. clicmd:: show mgmt backend-yang-xpath-registry
+
+ This command shows which Backend adapters are registered for which YANG
+ data subtree(s).
+
+.. clicmd:: show mgmt frontend-adapter all [detail]
+
+ This command shows the frontend adapter information and the clients
+ connected to the adapters.
+
+.. clicmd:: show mgmt transaction all
+
+ Shows the list of transaction and bunch of information about the transaction.
+
+.. clicmd:: show mgmt get-config [candidate|running] XPATH
+
+ This command uses the GET_CONFIG operation over the MGMT Frontend interface and
+ returns the xpaths and values of the nodes of the subtree pointed by the <xpath>.
+
+.. clicmd:: show mgmt get-data [candidate|operation|running] XPATH
+
+ This command uses the GET_DATA operation over the MGMT Frontend interface and
+ returns the xpaths and values of the nodes of the subtree pointed by the <xpath>.
+ Currenlty supported values for 'candidate' and 'running' only
+ ('operational' shall be supported in future soon).
+
+.. clicmd:: show mgmt database-contents [candidate|operation|running] [xpath WORD] [file WORD] json|xml
+
+ This command dumps the subtree pointed by the xpath in JSON or XML format. If filepath is
+ not present then the tree will be printed on the shell.
+
+.. clicmd:: show mgmt commit-history
+
+ This command dumps details of upto last 10 commits handled by MGMTd.
Subtract the BGP local preference from an existing `local_pref`.
-.. clicmd:: set distance DISTANCE
+.. clicmd:: set distance (1-255)
- Set the Administrative distance to DISTANCE to use for the route.
+ Set the Administrative distance to use for the route.
This is only locally significant and will not be dispersed to peers.
.. clicmd:: set weight WEIGHT
doc/user/flowspec.rst \
doc/user/watchfrr.rst \
doc/user/wecmp_linkbw.rst \
+ doc/user/mgmtd.rst \
# end
EXTRA_DIST += \
gzip \
py-pip \
rtrlib \
+ protobuf-c-dev \
&& pip install pytest
RUN mkdir -p /pkgs/apk
COPY . /src
readline-devel texinfo net-snmp-devel groff pkgconfig \
json-c-devel pam-devel bison flex pytest c-ares-devel \
python3-devel python3-sphinx libcap-devel systemd-devel \
+ protobuf-c-devel \
https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-7-x86_64-Packages/libyang2-2.0.0.10.g2eb910e4-1.el7.x86_64.rpm \
https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-7-x86_64-Packages/libyang2-devel-2.0.0.10.g2eb910e4-1.el7.x86_64.rpm \
https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-00146/CentOS-7-x86_64-Packages/librtr-0.8.0-1.el7.x86_64.rpm \
automake libtool make readline-devel texinfo net-snmp-devel pkgconfig \
groff pkgconfig json-c-devel pam-devel bison flex python3-pytest \
c-ares-devel python3-devel python3-sphinx libcap-devel platform-python-devel \
+ protobuf-c-devel \
https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-8-x86_64-Packages/libyang2-2.0.0.10.g2eb910e4-1.el8.x86_64.rpm \
https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-8-x86_64-Packages/libyang2-devel-2.0.0.10.g2eb910e4-1.el8.x86_64.rpm \
https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-00146/CentOS-7-x86_64-Packages/librtr-0.8.0-1.el7.x86_64.rpm \
RUN apt-get update && \
apt-get install -y libpcre3-dev apt-transport-https ca-certificates curl wget logrotate \
libc-ares2 libjson-c3 vim procps libreadline7 gnupg2 lsb-release apt-utils \
- tini && rm -rf /var/lib/apt/lists/*
+ libprotobuf-c-dev protobuf-c-compiler tini && rm -rf /var/lib/apt/lists/*
RUN curl -s https://deb.frrouting.org/frr/keys.asc | apt-key add -
RUN echo deb https://deb.frrouting.org/frr $(lsb_release -s -c) frr-stable | tee -a /etc/apt/sources.list.d/frr.list
pkg-config libpam0g-dev libjson-c-dev bison flex python3-pip \
libc-ares-dev python3-dev python3-sphinx \
install-info build-essential libsnmp-dev perl libcap-dev \
- libelf-dev \
+ libelf-dev libprotobuf-c-dev protobuf-c-compiler \
sudo gdb iputils-ping time \
python-pip net-tools iproute2 && \
python3 -m pip install wheel && \
pkg-config libpam0g-dev libjson-c-dev bison flex python3-pip \
libc-ares-dev python3-dev python3-sphinx \
install-info build-essential libsnmp-dev perl \
- libcap-dev python2 libelf-dev \
+ libcap-dev python2 libelf-dev libprotobuf-c-dev protobuf-c-compiler \
sudo gdb curl iputils-ping time \
lua5.3 liblua5.3-dev \
net-tools iproute2 && \
#include <zebra.h>
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "command.h"
#include "stream.h"
vty_out(vty, "%-3u %-17pI4 %-21s", 0, &nbr->src, IF_NAME(nbr->ei));
if (nbr->t_holddown)
vty_out(vty, "%-7lu",
- thread_timer_remain_second(nbr->t_holddown));
+ event_timer_remain_second(nbr->t_holddown));
else
vty_out(vty, "- ");
vty_out(vty, "%-8u %-6u %-5u", 0, 0, EIGRP_PACKET_RETRANS_TIME);
#include "command.h"
#include "prefix.h"
#include "table.h"
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "log.h"
#include "stream.h"
// TODO: check Graceful restart after 10sec
/* cancel GR scheduled */
- thread_cancel(&(e->t_distribute));
+ event_cancel(&(e->t_distribute));
/* schedule Graceful restart for whole process in 10sec */
- thread_add_timer(master, eigrp_distribute_timer_process, e,
- (10), &e->t_distribute);
+ event_add_timer(master, eigrp_distribute_timer_process, e, (10),
+ &e->t_distribute);
return;
}
// TODO: check Graceful restart after 10sec
/* Cancel GR scheduled */
- thread_cancel(&(ei->t_distribute));
+ event_cancel(&(ei->t_distribute));
/* schedule Graceful restart for interface in 10sec */
- thread_add_timer(master, eigrp_distribute_timer_interface, ei, 10,
- &ei->t_distribute);
+ event_add_timer(master, eigrp_distribute_timer_interface, ei, 10,
+ &ei->t_distribute);
}
/*
* Called when 10sec waiting time expire and
* executes Graceful restart for whole process
*/
-void eigrp_distribute_timer_process(struct thread *thread)
+void eigrp_distribute_timer_process(struct event *thread)
{
struct eigrp *eigrp;
- eigrp = THREAD_ARG(thread);
+ eigrp = EVENT_ARG(thread);
/* execute GR for whole process */
eigrp_update_send_process_GR(eigrp, EIGRP_GR_FILTER, NULL);
* Called when 10sec waiting time expire and
* executes Graceful restart for interface
*/
-void eigrp_distribute_timer_interface(struct thread *thread)
+void eigrp_distribute_timer_interface(struct event *thread)
{
struct eigrp_interface *ei;
- ei = THREAD_ARG(thread);
+ ei = EVENT_ARG(thread);
ei->t_distribute = NULL;
/* execute GR for interface */
extern void eigrp_distribute_update_interface(struct interface *ifp);
extern void eigrp_distribute_update_all(struct prefix_list *plist);
extern void eigrp_distribute_update_all_wrapper(struct access_list *alist);
-extern void eigrp_distribute_timer_process(struct thread *thread);
-extern void eigrp_distribute_timer_interface(struct thread *thread);
+extern void eigrp_distribute_timer_process(struct event *thread);
+extern void eigrp_distribute_timer_interface(struct event *thread);
#endif /* EIGRPD_EIGRP_FILTER_H_ */
*/
#include <zebra.h>
-#include <thread.h>
+#include "frrevent.h"
#include "prefix.h"
#include "table.h"
#include "memory.h"
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"
* Sends hello packet via multicast for all interfaces eigrp
* is configured for
*/
-void eigrp_hello_timer(struct thread *thread)
+void eigrp_hello_timer(struct event *thread)
{
struct eigrp_interface *ei;
- ei = THREAD_ARG(thread);
+ ei = EVENT_ARG(thread);
if (IS_DEBUG_EIGRP(0, TIMERS))
zlog_debug("Start Hello Timer (%s) Expire [%u]", IF_NAME(ei),
eigrp_hello_send(ei, EIGRP_HELLO_NORMAL, NULL);
/* Hello timer set. */
- thread_add_timer(master, eigrp_hello_timer, ei, ei->params.v_hello,
- &ei->t_hello);
+ event_add_timer(master, eigrp_hello_timer, ei, ei->params.v_hello,
+ &ei->t_hello);
}
/**
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
nbr->ei->on_write_q = 1;
}
- thread_add_write(master, eigrp_write, nbr->ei->eigrp,
- nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
+ event_add_write(master, eigrp_write, nbr->ei->eigrp,
+ nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
}
}
if (ei->eigrp->t_write == NULL) {
if (flags & EIGRP_HELLO_GRACEFUL_SHUTDOWN) {
- thread_execute(master, eigrp_write, ei->eigrp,
- ei->eigrp->fd);
+ event_execute(master, eigrp_write, ei->eigrp,
+ ei->eigrp->fd);
} else {
- thread_add_write(master, eigrp_write, ei->eigrp,
- ei->eigrp->fd,
- &ei->eigrp->t_write);
+ event_add_write(master, eigrp_write, ei->eigrp,
+ ei->eigrp->fd,
+ &ei->eigrp->t_write);
}
}
}
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "prefix.h"
#include "if.h"
/* Set multicast memberships appropriately for new state. */
eigrp_if_set_multicast(ei);
- thread_add_event(master, eigrp_hello_timer, ei, (1), &ei->t_hello);
+ event_add_event(master, eigrp_hello_timer, ei, (1), &ei->t_hello);
/*Prepare metrics*/
metric.bandwidth = eigrp_bandwidth_to_scaled(ei->params.bandwidth);
return 0;
/* Shutdown packet reception and sending */
- THREAD_OFF(ei->t_hello);
+ EVENT_OFF(ei->t_hello);
eigrp_if_stream_unset(ei);
if (ei->on_write_q) {
listnode_delete(eigrp->oi_write_q, ei);
if (list_isempty(eigrp->oi_write_q))
- thread_cancel(&(eigrp->t_write));
+ event_cancel(&(eigrp->t_write));
ei->on_write_q = 0;
}
}
struct eigrp *eigrp = ei->eigrp;
if (source == INTERFACE_DOWN_BY_VTY) {
- thread_cancel(&ei->t_hello);
+ event_cancel(&ei->t_hello);
eigrp_hello_send(ei, EIGRP_HELLO_GRACEFUL_SHUTDOWN, NULL);
}
/* FSM macros*/
#define EIGRP_FSM_EVENT_SCHEDULE(I, E) \
- thread_add_event(master, eigrp_fsm_event, (I), (E))
+ event_add_event(master, eigrp_fsm_event, (I), (E))
#endif /* _ZEBRA_EIGRP_MACROS_H_ */
#include <lib/version.h>
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "linklist.h"
#include "if.h"
struct option longopts[] = {{0}};
/* Master of threads. */
-struct thread_master *master;
+struct event_loop *master;
/* Forward declaration of daemon info structure. */
static struct frr_daemon_info eigrpd_di;
#include "prefix.h"
#include "memory.h"
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "stream.h"
#include "table.h"
#include "log.h"
eigrp_topology_neighbor_down(nbr->ei->eigrp, nbr);
/* Cancel all events. */ /* Thread lookup cost would be negligible. */
- thread_cancel_event(master, nbr);
+ event_cancel_event(master, nbr);
eigrp_fifo_free(nbr->multicast_queue);
eigrp_fifo_free(nbr->retrans_queue);
- THREAD_OFF(nbr->t_holddown);
+ EVENT_OFF(nbr->t_holddown);
if (nbr->ei)
listnode_delete(nbr->ei->nbrs, nbr);
XFREE(MTYPE_EIGRP_NEIGHBOR, nbr);
}
-void holddown_timer_expired(struct thread *thread)
+void holddown_timer_expired(struct event *thread)
{
- struct eigrp_neighbor *nbr = THREAD_ARG(thread);
+ struct eigrp_neighbor *nbr = EVENT_ARG(thread);
struct eigrp *eigrp = nbr->ei->eigrp;
zlog_info("Neighbor %pI4 (%s) is down: holding time expired", &nbr->src,
// hold time..
nbr->v_holddown = EIGRP_HOLD_INTERVAL_DEFAULT;
- THREAD_OFF(nbr->t_holddown);
+ EVENT_OFF(nbr->t_holddown);
/* out with the old */
if (nbr->multicast_queue)
switch (nbr->state) {
case EIGRP_NEIGHBOR_DOWN: {
/*Start Hold Down Timer for neighbor*/
- // THREAD_OFF(nbr->t_holddown);
- // THREAD_TIMER_ON(master, nbr->t_holddown,
+ // EVENT_OFF(nbr->t_holddown);
+ // EVENT_TIMER_ON(master, nbr->t_holddown,
// holddown_timer_expired,
// nbr, nbr->v_holddown);
break;
}
case EIGRP_NEIGHBOR_PENDING: {
/*Reset Hold Down Timer for neighbor*/
- THREAD_OFF(nbr->t_holddown);
- thread_add_timer(master, holddown_timer_expired, nbr,
- nbr->v_holddown, &nbr->t_holddown);
+ EVENT_OFF(nbr->t_holddown);
+ event_add_timer(master, holddown_timer_expired, nbr,
+ nbr->v_holddown, &nbr->t_holddown);
break;
}
case EIGRP_NEIGHBOR_UP: {
/*Reset Hold Down Timer for neighbor*/
- THREAD_OFF(nbr->t_holddown);
- thread_add_timer(master, holddown_timer_expired, nbr,
- nbr->v_holddown, &nbr->t_holddown);
+ EVENT_OFF(nbr->t_holddown);
+ event_add_timer(master, holddown_timer_expired, nbr,
+ nbr->v_holddown, &nbr->t_holddown);
break;
}
}
extern struct eigrp_neighbor *eigrp_nbr_new(struct eigrp_interface *ei);
extern void eigrp_nbr_delete(struct eigrp_neighbor *neigh);
-extern void holddown_timer_expired(struct thread *thread);
+extern void holddown_timer_expired(struct event *thread);
extern int eigrp_neighborship_check(struct eigrp_neighbor *neigh,
struct TLV_Parameter_Type *tlv);
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "prefix.h"
#include "if.h"
extern int eigrp_network_set(struct eigrp *eigrp, struct prefix *p);
extern int eigrp_network_unset(struct eigrp *eigrp, struct prefix *p);
-extern void eigrp_hello_timer(struct thread *thread);
+extern void eigrp_hello_timer(struct event *thread);
extern void eigrp_if_update(struct interface *);
extern int eigrp_if_add_allspfrouters(struct eigrp *, struct prefix *,
unsigned int);
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "vty.h"
return 1;
}
-void eigrp_write(struct thread *thread)
+void eigrp_write(struct event *thread)
{
- struct eigrp *eigrp = THREAD_ARG(thread);
+ struct eigrp *eigrp = EVENT_ARG(thread);
struct eigrp_header *eigrph;
struct eigrp_interface *ei;
struct eigrp_packet *ep;
/* If packets still remain in queue, call write thread. */
if (!list_isempty(eigrp->oi_write_q)) {
- thread_add_write(master, eigrp_write, eigrp, eigrp->fd,
- &eigrp->t_write);
+ event_add_write(master, eigrp_write, eigrp, eigrp->fd,
+ &eigrp->t_write);
}
}
/* Starting point of packet process function. */
-void eigrp_read(struct thread *thread)
+void eigrp_read(struct event *thread)
{
int ret;
struct stream *ibuf;
uint16_t length = 0;
/* first of all get interface pointer. */
- eigrp = THREAD_ARG(thread);
+ eigrp = EVENT_ARG(thread);
/* prepare for next packet. */
- thread_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
+ event_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
stream_reset(eigrp->ibuf);
if (!(ibuf = eigrp_recv_packet(eigrp, eigrp->fd, &ifp, eigrp->ibuf))) {
eigrp_fifo_push(nbr->ei->obuf, duplicate);
/*Start retransmission timer*/
- thread_add_timer(master, eigrp_unack_packet_retrans, nbr,
- EIGRP_PACKET_RETRANS_TIME,
- &ep->t_retrans_timer);
+ event_add_timer(master, eigrp_unack_packet_retrans, nbr,
+ EIGRP_PACKET_RETRANS_TIME,
+ &ep->t_retrans_timer);
/*Increment sequence number counter*/
nbr->ei->eigrp->sequence_number++;
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
nbr->ei->on_write_q = 1;
}
- thread_add_write(master, eigrp_write, nbr->ei->eigrp,
- nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
+ event_add_write(master, eigrp_write, nbr->ei->eigrp,
+ nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
}
}
if (ep->s)
stream_free(ep->s);
- THREAD_OFF(ep->t_retrans_timer);
+ EVENT_OFF(ep->t_retrans_timer);
XFREE(MTYPE_EIGRP_PACKET, ep);
}
return 0;
}
-void eigrp_unack_packet_retrans(struct thread *thread)
+void eigrp_unack_packet_retrans(struct event *thread)
{
struct eigrp_neighbor *nbr;
- nbr = (struct eigrp_neighbor *)THREAD_ARG(thread);
+ nbr = (struct eigrp_neighbor *)EVENT_ARG(thread);
struct eigrp_packet *ep;
ep = eigrp_fifo_next(nbr->retrans_queue);
}
/*Start retransmission timer*/
- thread_add_timer(master, eigrp_unack_packet_retrans, nbr,
- EIGRP_PACKET_RETRANS_TIME,
- &ep->t_retrans_timer);
+ event_add_timer(master, eigrp_unack_packet_retrans, nbr,
+ EIGRP_PACKET_RETRANS_TIME,
+ &ep->t_retrans_timer);
/* Hook thread to write packet. */
if (nbr->ei->on_write_q == 0) {
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
nbr->ei->on_write_q = 1;
}
- thread_add_write(master, eigrp_write, nbr->ei->eigrp,
- nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
+ event_add_write(master, eigrp_write, nbr->ei->eigrp,
+ nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
}
}
-void eigrp_unack_multicast_packet_retrans(struct thread *thread)
+void eigrp_unack_multicast_packet_retrans(struct event *thread)
{
struct eigrp_neighbor *nbr;
- nbr = (struct eigrp_neighbor *)THREAD_ARG(thread);
+ nbr = (struct eigrp_neighbor *)EVENT_ARG(thread);
struct eigrp_packet *ep;
ep = eigrp_fifo_next(nbr->multicast_queue);
}
/*Start retransmission timer*/
- thread_add_timer(master, eigrp_unack_multicast_packet_retrans,
- nbr, EIGRP_PACKET_RETRANS_TIME,
- &ep->t_retrans_timer);
+ event_add_timer(master, eigrp_unack_multicast_packet_retrans,
+ nbr, EIGRP_PACKET_RETRANS_TIME,
+ &ep->t_retrans_timer);
/* Hook thread to write packet. */
if (nbr->ei->on_write_q == 0) {
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
nbr->ei->on_write_q = 1;
}
- thread_add_write(master, eigrp_write, nbr->ei->eigrp,
- nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
+ event_add_write(master, eigrp_write, nbr->ei->eigrp,
+ nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
}
}
#define _ZEBRA_EIGRP_PACKET_H
/*Prototypes*/
-extern void eigrp_read(struct thread *thread);
-extern void eigrp_write(struct thread *thread);
+extern void eigrp_read(struct event *thread);
+extern void eigrp_write(struct event *thread);
extern struct eigrp_packet *eigrp_packet_new(size_t size,
struct eigrp_neighbor *nbr);
extern uint16_t eigrp_add_authTLV_SHA256_to_stream(struct stream *s,
struct eigrp_interface *ei);
-extern void eigrp_unack_packet_retrans(struct thread *thread);
-extern void eigrp_unack_multicast_packet_retrans(struct thread *thread);
+extern void eigrp_unack_packet_retrans(struct event *thread);
+extern void eigrp_unack_multicast_packet_retrans(struct event *thread);
/*
* untill there is reason to have their own header, these externs are found in
extern void eigrp_hello_receive(struct eigrp *eigrp, struct ip *iph,
struct eigrp_header *eigrph, struct stream *s,
struct eigrp_interface *ei, int size);
-extern void eigrp_hello_timer(struct thread *thread);
+extern void eigrp_hello_timer(struct event *thread);
/*
* These externs are found in eigrp_update.c
struct eigrp_interface *exception);
extern void eigrp_update_send_init(struct eigrp_neighbor *nbr);
extern void eigrp_update_send_EOT(struct eigrp_neighbor *nbr);
-extern void eigrp_update_send_GR_thread(struct thread *thread);
+extern void eigrp_update_send_GR_thread(struct event *thread);
extern void eigrp_update_send_GR(struct eigrp_neighbor *nbr,
enum GR_type gr_type, struct vty *vty);
extern void eigrp_update_send_interface_GR(struct eigrp_interface *ei,
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"
*/
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"
#include <net-snmp/net-snmp-config.h>
#include <net-snmp/net-snmp-includes.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"
struct list *oi_write_q;
/*Threads*/
- struct thread *t_write;
- struct thread *t_read;
- struct thread *t_distribute; /* timer for distribute list */
+ struct event *t_write;
+ struct event *t_read;
+ struct event *t_distribute; /* timer for distribute list */
struct route_table *networks; /* EIGRP config networks. */
struct list *nbrs; /* EIGRP Neighbor List */
/* Threads. */
- struct thread *t_hello; /* timer */
- struct thread *t_distribute; /* timer for distribute list */
+ struct event *t_hello; /* timer */
+ struct event *t_distribute; /* timer for distribute list */
int on_write_q;
uint16_t v_holddown;
/* Threads. */
- struct thread *t_holddown;
- struct thread *t_nbr_send_gr; /* thread for sending multiple GR packet
+ struct event *t_holddown;
+ struct event *t_nbr_send_gr; /* thread for sending multiple GR packet
chunks */
struct eigrp_fifo *retrans_queue;
struct in_addr dst;
/*Packet retransmission thread*/
- struct thread *t_retrans_timer;
+ struct event *t_retrans_timer;
/*Packet retransmission counter*/
uint8_t retrans_counter;
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"
*
* Uses nbr_gr_packet_type and t_nbr_send_gr from neighbor.
*/
-void eigrp_update_send_GR_thread(struct thread *thread)
+void eigrp_update_send_GR_thread(struct event *thread)
{
struct eigrp_neighbor *nbr;
/* get argument from thread */
- nbr = THREAD_ARG(thread);
+ nbr = EVENT_ARG(thread);
/* remove this thread pointer */
/* if there is packet waiting in queue,
* schedule this thread again with small delay */
if (nbr->retrans_queue->count > 0) {
- thread_add_timer_msec(master, eigrp_update_send_GR_thread, nbr,
- 10, &nbr->t_nbr_send_gr);
+ event_add_timer_msec(master, eigrp_update_send_GR_thread, nbr,
+ 10, &nbr->t_nbr_send_gr);
return;
}
/* if it wasn't last chunk, schedule this thread again */
if (nbr->nbr_gr_packet_type != EIGRP_PACKET_PART_LAST) {
- thread_execute(master, eigrp_update_send_GR_thread, nbr, 0);
+ event_execute(master, eigrp_update_send_GR_thread, nbr, 0);
}
}
/* indicate, that this is first GR Update packet chunk */
nbr->nbr_gr_packet_type = EIGRP_PACKET_PART_FIRST;
/* execute packet sending in thread */
- thread_execute(master, eigrp_update_send_GR_thread, nbr, 0);
+ event_execute(master, eigrp_update_send_GR_thread, nbr, 0);
}
/**
#include <zebra.h>
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "table.h"
#include "vty.h"
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "command.h"
#include "network.h"
#include "prefix.h"
struct zclient *zclient = NULL;
/* For registering threads. */
-extern struct thread_master *master;
+extern struct event_loop *master;
struct in_addr router_id_zebra;
/* Router-id update message from zebra. */
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "command.h"
#include "linklist.h"
eigrp->ibuf = stream_new(EIGRP_PACKET_MAX_LEN + 1);
- thread_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
+ event_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
eigrp->oi_write_q = list_new();
eigrp->topology_table = route_table_init();
eigrp_if_free(ei, INTERFACE_DOWN_BY_FINAL);
}
- THREAD_OFF(eigrp->t_write);
- THREAD_OFF(eigrp->t_read);
+ EVENT_OFF(eigrp->t_write);
+ EVENT_OFF(eigrp->t_read);
close(eigrp->fd);
list_delete(&eigrp->eiflist);
struct list *eigrp;
/* EIGRP thread master. */
- struct thread_master *master;
+ struct event_loop *master;
/* Zebra interface list. */
struct list *iflist;
/* Extern variables. */
extern struct zclient *zclient;
-extern struct thread_master *master;
+extern struct event_loop *master;
extern struct eigrp_master *eigrp_om;
extern struct zebra_privs_t eigrpd_privs;
Walk through a routing table (or subset thereof) and dump all the non-null
(struct route_node *)->info pointers.
-Argument: A lib/thread.h::(struct route_node *) pointing to the route_node
+Argument: A lib/hread.h::(struct route_node *) pointing to the route_node
under which all data should be dumped
end
enum fabricd_sync_state initial_sync_state;
time_t initial_sync_start;
struct isis_circuit *initial_sync_circuit;
- struct thread *initial_sync_timeout;
+ struct event *initial_sync_timeout;
struct isis_spftree *spftree;
struct skiplist *neighbors;
uint8_t tier;
uint8_t tier_config;
uint8_t tier_pending;
- struct thread *tier_calculation_timer;
- struct thread *tier_set_timer;
+ struct event *tier_calculation_timer;
+ struct event *tier_set_timer;
int csnp_delay;
bool always_send_csnp;
void fabricd_finish(struct fabricd *f)
{
- THREAD_OFF(f->initial_sync_timeout);
+ EVENT_OFF(f->initial_sync_timeout);
- THREAD_OFF(f->tier_calculation_timer);
+ EVENT_OFF(f->tier_calculation_timer);
- THREAD_OFF(f->tier_set_timer);
+ EVENT_OFF(f->tier_set_timer);
isis_spftree_del(f->spftree);
neighbor_lists_clear(f);
hash_free(f->neighbors_neighbors);
}
-static void fabricd_initial_sync_timeout(struct thread *thread)
+static void fabricd_initial_sync_timeout(struct event *thread)
{
- struct fabricd *f = THREAD_ARG(thread);
+ struct fabricd *f = EVENT_ARG(thread);
if (IS_DEBUG_ADJ_PACKETS)
zlog_debug(
if (f->initial_sync_timeout)
return;
- thread_add_timer(master, fabricd_initial_sync_timeout, f,
- timeout, &f->initial_sync_timeout);
+ event_add_timer(master, fabricd_initial_sync_timeout, f, timeout,
+ &f->initial_sync_timeout);
f->initial_sync_start = monotime(NULL);
if (IS_DEBUG_ADJ_PACKETS)
f->initial_sync_circuit->interface->name);
f->initial_sync_state = FABRICD_SYNC_COMPLETE;
f->initial_sync_circuit = NULL;
- THREAD_OFF(f->initial_sync_timeout);
+ EVENT_OFF(f->initial_sync_timeout);
}
static void fabricd_bump_tier_calculation_timer(struct fabricd *f);
return tier;
}
-static void fabricd_tier_set_timer(struct thread *thread)
+static void fabricd_tier_set_timer(struct event *thread)
{
- struct fabricd *f = THREAD_ARG(thread);
+ struct fabricd *f = EVENT_ARG(thread);
fabricd_set_tier(f, f->tier_pending);
}
-static void fabricd_tier_calculation_cb(struct thread *thread)
+static void fabricd_tier_calculation_cb(struct event *thread)
{
- struct fabricd *f = THREAD_ARG(thread);
+ struct fabricd *f = EVENT_ARG(thread);
uint8_t tier = ISIS_TIER_UNDEFINED;
tier = fabricd_calculate_fabric_tier(f->area);
zlog_info("OpenFabric: Got tier %hhu from algorithm. Arming timer.",
tier);
f->tier_pending = tier;
- thread_add_timer(master, fabricd_tier_set_timer, f,
- f->area->lsp_gen_interval[ISIS_LEVEL2 - 1],
- &f->tier_set_timer);
-
+ event_add_timer(master, fabricd_tier_set_timer, f,
+ f->area->lsp_gen_interval[ISIS_LEVEL2 - 1],
+ &f->tier_set_timer);
}
static void fabricd_bump_tier_calculation_timer(struct fabricd *f)
{
/* Cancel timer if we already know our tier */
if (f->tier != ISIS_TIER_UNDEFINED || f->tier_set_timer) {
- THREAD_OFF(f->tier_calculation_timer);
+ EVENT_OFF(f->tier_calculation_timer);
return;
}
/* If we need to calculate the tier, wait some
* time for the topology to settle before running
* the calculation */
- THREAD_OFF(f->tier_calculation_timer);
+ EVENT_OFF(f->tier_calculation_timer);
- thread_add_timer(master, fabricd_tier_calculation_cb, f,
- 2 * f->area->lsp_gen_interval[ISIS_LEVEL2 - 1],
- &f->tier_calculation_timer);
+ event_add_timer(master, fabricd_tier_calculation_cb, f,
+ 2 * f->area->lsp_gen_interval[ISIS_LEVEL2 - 1],
+ &f->tier_calculation_timer);
}
static void fabricd_set_tier(struct fabricd *f, uint8_t tier)
if (!circuit->t_send_csnp[1])
continue;
- THREAD_OFF(circuit->t_send_csnp[ISIS_LEVEL2 - 1]);
- thread_add_timer_msec(master, send_l2_csnp, circuit,
- isis_jitter(f->csnp_delay, CSNP_JITTER),
- &circuit->t_send_csnp[ISIS_LEVEL2 - 1]);
+ EVENT_OFF(circuit->t_send_csnp[ISIS_LEVEL2 - 1]);
+ event_add_timer_msec(master, send_l2_csnp, circuit,
+ isis_jitter(f->csnp_delay, CSNP_JITTER),
+ &circuit->t_send_csnp[ISIS_LEVEL2 - 1]);
}
}
#include "hash.h"
#include "vty.h"
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "if.h"
#include "stream.h"
#include "bfd.h"
/* Remove self from snmp list without walking the list*/
list_delete_node(adj->circuit->snmp_adj_list, adj->snmp_list_node);
- THREAD_OFF(adj->t_expire);
+ EVENT_OFF(adj->t_expire);
if (adj->adj_state != ISIS_ADJ_DOWN)
adj->adj_state = ISIS_ADJ_DOWN;
adj->flaps++;
if (level == IS_LEVEL_1) {
- thread_add_timer(master, send_l1_csnp,
- circuit, 0,
- &circuit->t_send_csnp[0]);
+ event_add_timer(
+ master, send_l1_csnp, circuit,
+ 0, &circuit->t_send_csnp[0]);
} else {
- thread_add_timer(master, send_l2_csnp,
- circuit, 0,
- &circuit->t_send_csnp[1]);
+ event_add_timer(
+ master, send_l2_csnp, circuit,
+ 0, &circuit->t_send_csnp[1]);
}
} else if (old_state == ISIS_ADJ_UP) {
circuit->upadjcount[level - 1]--;
assert(!"Reached end of function where we are not expecting to");
}
-void isis_adj_expire(struct thread *thread)
+void isis_adj_expire(struct event *thread)
{
struct isis_adjacency *adj;
/*
* Get the adjacency
*/
- adj = THREAD_ARG(thread);
+ adj = EVENT_ARG(thread);
assert(adj);
adj->t_expire = NULL;
enum isis_threeway_state threeway_state;
uint32_t ext_circuit_id;
int flaps; /* number of adjacency flaps */
- struct thread *t_expire; /* expire after hold_time */
+ struct event *t_expire; /* expire after hold_time */
struct isis_circuit *circuit; /* back pointer */
uint16_t *mt_set; /* Topologies this adjacency is valid for */
unsigned int mt_count; /* Number of entries in mt_set */
enum isis_adj_state state, const char *reason);
void isis_adj_print(struct isis_adjacency *adj);
const char *isis_adj_yang_state(enum isis_adj_state state);
-void isis_adj_expire(struct thread *thread);
+void isis_adj_expire(struct event *thread);
void isis_adj_print_vty(struct isis_adjacency *adj, struct vty *vty,
char detail);
void isis_adj_print_json(struct isis_adjacency *adj, struct json_object *json,
void isis_adj_build_neigh_list(struct list *adjdb, struct list *list);
void isis_adj_build_up_list(struct list *adjdb, struct list *list);
int isis_adj_usage2levels(enum isis_adj_usage usage);
-void isis_bfd_startup_timer(struct thread *thread);
+void isis_bfd_startup_timer(struct event *thread);
const char *isis_adj_name(const struct isis_adjacency *adj);
#endif /* ISIS_ADJACENCY_H */
return 0;
}
-void isis_bfd_init(struct thread_master *tm)
+void isis_bfd_init(struct event_loop *tm)
{
bfd_protocol_integration_init(zclient, tm);
#define ISIS_BFD_H
struct isis_circuit;
-struct thread_master;
+struct event_loop;
void isis_bfd_circuit_cmd(struct isis_circuit *circuit);
-void isis_bfd_init(struct thread_master *tm);
+void isis_bfd_init(struct event_loop *tm);
#endif
#include "if.h"
#include "linklist.h"
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "hash.h"
#include "prefix.h"
void isis_circuit_prepare(struct isis_circuit *circuit)
{
#if ISIS_METHOD != ISIS_METHOD_DLPI
- thread_add_read(master, isis_receive, circuit, circuit->fd,
- &circuit->t_read);
+ event_add_read(master, isis_receive, circuit, circuit->fd,
+ &circuit->t_read);
#else
- thread_add_timer_msec(master, isis_receive, circuit,
- listcount(circuit->area->circuit_list) * 100,
- &circuit->t_read);
+ event_add_timer_msec(master, isis_receive, circuit,
+ listcount(circuit->area->circuit_list) * 100,
+ &circuit->t_read);
#endif
}
send_hello_sched(circuit, level, TRIGGERED_IIH_DELAY);
circuit->u.bc.lan_neighs[level - 1] = list_new();
- thread_add_timer(master, isis_run_dr,
- &circuit->level_arg[level - 1],
- 2 * circuit->hello_interval[level - 1],
- &circuit->u.bc.t_run_dr[level - 1]);
+ event_add_timer(master, isis_run_dr,
+ &circuit->level_arg[level - 1],
+ 2 * circuit->hello_interval[level - 1],
+ &circuit->u.bc.t_run_dr[level - 1]);
}
/* 8.4.1 b) FIXME: solicit ES - 8.4.6 */
/* initializing PSNP timers */
if (circuit->is_type & IS_LEVEL_1)
- thread_add_timer(
+ event_add_timer(
master, send_l1_psnp, circuit,
isis_jitter(circuit->psnp_interval[0], PSNP_JITTER),
&circuit->t_send_psnp[0]);
if (circuit->is_type & IS_LEVEL_2)
- thread_add_timer(
+ event_add_timer(
master, send_l2_psnp, circuit,
isis_jitter(circuit->psnp_interval[1], PSNP_JITTER),
&circuit->t_send_psnp[1]);
memset(circuit->u.bc.l2_desig_is, 0, ISIS_SYS_ID_LEN + 1);
memset(circuit->u.bc.snpa, 0, ETH_ALEN);
- THREAD_OFF(circuit->u.bc.t_send_lan_hello[0]);
- THREAD_OFF(circuit->u.bc.t_send_lan_hello[1]);
- THREAD_OFF(circuit->u.bc.t_run_dr[0]);
- THREAD_OFF(circuit->u.bc.t_run_dr[1]);
- THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[0]);
- THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[1]);
+ EVENT_OFF(circuit->u.bc.t_send_lan_hello[0]);
+ EVENT_OFF(circuit->u.bc.t_send_lan_hello[1]);
+ EVENT_OFF(circuit->u.bc.t_run_dr[0]);
+ EVENT_OFF(circuit->u.bc.t_run_dr[1]);
+ EVENT_OFF(circuit->u.bc.t_refresh_pseudo_lsp[0]);
+ EVENT_OFF(circuit->u.bc.t_refresh_pseudo_lsp[1]);
circuit->lsp_regenerate_pending[0] = 0;
circuit->lsp_regenerate_pending[1] = 0;
} else if (circuit->circ_type == CIRCUIT_T_P2P) {
isis_delete_adj(circuit->u.p2p.neighbor);
circuit->u.p2p.neighbor = NULL;
- THREAD_OFF(circuit->u.p2p.t_send_p2p_hello);
+ EVENT_OFF(circuit->u.p2p.t_send_p2p_hello);
}
/*
circuit->snmp_adj_idx_gen = 0;
/* Cancel all active threads */
- THREAD_OFF(circuit->t_send_csnp[0]);
- THREAD_OFF(circuit->t_send_csnp[1]);
- THREAD_OFF(circuit->t_send_psnp[0]);
- THREAD_OFF(circuit->t_send_psnp[1]);
- THREAD_OFF(circuit->t_read);
+ EVENT_OFF(circuit->t_send_csnp[0]);
+ EVENT_OFF(circuit->t_send_csnp[1]);
+ EVENT_OFF(circuit->t_send_psnp[0]);
+ EVENT_OFF(circuit->t_send_psnp[1]);
+ EVENT_OFF(circuit->t_read);
if (circuit->tx_queue) {
isis_tx_queue_free(circuit->tx_queue);
circuit->snd_stream = NULL;
}
- thread_cancel_event(master, circuit);
+ event_cancel_event(master, circuit);
return;
}
struct isis_bcast_info {
uint8_t snpa[ETH_ALEN]; /* SNPA of this circuit */
char run_dr_elect[ISIS_LEVELS]; /* Should we run dr election ? */
- struct thread *t_run_dr[ISIS_LEVELS]; /* DR election thread */
- struct thread *t_send_lan_hello[ISIS_LEVELS]; /* send LAN IIHs in this
- thread */
+ struct event *t_run_dr[ISIS_LEVELS]; /* DR election thread */
+ struct event *t_send_lan_hello[ISIS_LEVELS]; /* send LAN IIHs in this
+ thread */
struct list *adjdb[ISIS_LEVELS]; /* adjacency dbs */
struct list *lan_neighs[ISIS_LEVELS]; /* list of lx neigh snpa */
char is_dr[ISIS_LEVELS]; /* Are we level x DR ? */
uint8_t l1_desig_is[ISIS_SYS_ID_LEN + 1]; /* level-1 DR */
uint8_t l2_desig_is[ISIS_SYS_ID_LEN + 1]; /* level-2 DR */
- struct thread *t_refresh_pseudo_lsp[ISIS_LEVELS]; /* refresh pseudo-node
+ struct event *t_refresh_pseudo_lsp[ISIS_LEVELS]; /* refresh pseudo-node
LSPs */
};
struct isis_p2p_info {
struct isis_adjacency *neighbor;
- struct thread *t_send_p2p_hello; /* send P2P IIHs in this thread */
+ struct event *t_send_p2p_hello; /* send P2P IIHs in this thread */
};
struct isis_circuit_arg {
/*
* Threads
*/
- struct thread *t_read;
- struct thread *t_send_csnp[ISIS_LEVELS];
- struct thread *t_send_psnp[ISIS_LEVELS];
+ struct event *t_read;
+ struct event *t_send_csnp[ISIS_LEVELS];
+ struct event *t_send_psnp[ISIS_LEVELS];
struct isis_tx_queue *tx_queue;
struct isis_circuit_arg
level_arg[ISIS_LEVELS]; /* used as argument for threads */
#include "if.h"
#include "linklist.h"
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "hash.h"
#include "prefix.h"
#include "stream.h"
#include "log.h"
#include "hash.h"
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "vty.h"
#include "stream.h"
return NULL; /* not reached */
}
-void isis_run_dr(struct thread *thread)
+void isis_run_dr(struct event *thread)
{
- struct isis_circuit_arg *arg = THREAD_ARG(thread);
+ struct isis_circuit_arg *arg = EVENT_ARG(thread);
assert(arg);
circuit->u.bc.is_dr[level - 1] = 0;
circuit->u.bc.run_dr_elect[level - 1] = 0;
- THREAD_OFF(circuit->u.bc.t_run_dr[level - 1]);
- THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
+ EVENT_OFF(circuit->u.bc.t_run_dr[level - 1]);
+ EVENT_OFF(circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
circuit->lsp_regenerate_pending[level - 1] = 0;
memcpy(id, circuit->isis->sysid, ISIS_SYS_ID_LEN);
if (level == 1) {
memset(circuit->u.bc.l1_desig_is, 0, ISIS_SYS_ID_LEN + 1);
- thread_add_timer(master, send_l1_psnp, circuit,
- isis_jitter(circuit->psnp_interval[level - 1],
- PSNP_JITTER),
- &circuit->t_send_psnp[0]);
+ event_add_timer(master, send_l1_psnp, circuit,
+ isis_jitter(circuit->psnp_interval[level - 1],
+ PSNP_JITTER),
+ &circuit->t_send_psnp[0]);
} else {
memset(circuit->u.bc.l2_desig_is, 0, ISIS_SYS_ID_LEN + 1);
- thread_add_timer(master, send_l2_psnp, circuit,
- isis_jitter(circuit->psnp_interval[level - 1],
- PSNP_JITTER),
- &circuit->t_send_psnp[1]);
+ event_add_timer(master, send_l2_psnp, circuit,
+ isis_jitter(circuit->psnp_interval[level - 1],
+ PSNP_JITTER),
+ &circuit->t_send_psnp[1]);
}
- THREAD_OFF(circuit->t_send_csnp[level - 1]);
+ EVENT_OFF(circuit->t_send_csnp[level - 1]);
- thread_add_timer(master, isis_run_dr,
- &circuit->level_arg[level - 1],
- 2 * circuit->hello_interval[level - 1],
- &circuit->u.bc.t_run_dr[level - 1]);
+ event_add_timer(master, isis_run_dr, &circuit->level_arg[level - 1],
+ 2 * circuit->hello_interval[level - 1],
+ &circuit->u.bc.t_run_dr[level - 1]);
- thread_add_event(master, isis_event_dis_status_change, circuit, 0,
- NULL);
+ event_add_event(master, isis_event_dis_status_change, circuit, 0, NULL);
return ISIS_OK;
}
assert(circuit->circuit_id); /* must be non-zero */
lsp_generate_pseudo(circuit, 1);
- thread_add_timer(master, send_l1_csnp, circuit,
- isis_jitter(circuit->csnp_interval[level - 1],
- CSNP_JITTER),
- &circuit->t_send_csnp[0]);
+ event_add_timer(master, send_l1_csnp, circuit,
+ isis_jitter(circuit->csnp_interval[level - 1],
+ CSNP_JITTER),
+ &circuit->t_send_csnp[0]);
} else {
memcpy(old_dr, circuit->u.bc.l2_desig_is, ISIS_SYS_ID_LEN + 1);
assert(circuit->circuit_id); /* must be non-zero */
lsp_generate_pseudo(circuit, 2);
- thread_add_timer(master, send_l2_csnp, circuit,
- isis_jitter(circuit->csnp_interval[level - 1],
- CSNP_JITTER),
- &circuit->t_send_csnp[1]);
+ event_add_timer(master, send_l2_csnp, circuit,
+ isis_jitter(circuit->csnp_interval[level - 1],
+ CSNP_JITTER),
+ &circuit->t_send_csnp[1]);
}
- thread_add_timer(master, isis_run_dr,
- &circuit->level_arg[level - 1],
- 2 * circuit->hello_interval[level - 1],
- &circuit->u.bc.t_run_dr[level - 1]);
- thread_add_event(master, isis_event_dis_status_change, circuit, 0,
- NULL);
+ event_add_timer(master, isis_run_dr, &circuit->level_arg[level - 1],
+ 2 * circuit->hello_interval[level - 1],
+ &circuit->u.bc.t_run_dr[level - 1]);
+ event_add_event(master, isis_event_dis_status_change, circuit, 0, NULL);
return ISIS_OK;
}
#ifndef _ZEBRA_ISIS_DR_H
#define _ZEBRA_ISIS_DR_H
-void isis_run_dr(struct thread *thread);
+void isis_run_dr(struct event *thread);
int isis_dr_elect(struct isis_circuit *circuit, int level);
int isis_dr_resign(struct isis_circuit *circuit, int level);
int isis_dr_commence(struct isis_circuit *circuit, int level);
#include "stream.h"
#include "command.h"
#include "if.h"
-#include "thread.h"
+#include "frrevent.h"
#include "isisd/isis_constants.h"
#include "isisd/isis_common.h"
DEFINE_MTYPE_STATIC(ISISD, ISIS_DYNHN, "ISIS dyn hostname");
-static void dyn_cache_cleanup(struct thread *);
+static void dyn_cache_cleanup(struct event *);
void dyn_cache_init(struct isis *isis)
{
isis->dyn_cache = list_new();
if (!CHECK_FLAG(im->options, F_ISIS_UNIT_TEST))
- thread_add_timer(master, dyn_cache_cleanup, isis, 120,
- &isis->t_dync_clean);
+ event_add_timer(master, dyn_cache_cleanup, isis, 120,
+ &isis->t_dync_clean);
}
void dyn_cache_finish(struct isis *isis)
struct listnode *node, *nnode;
struct isis_dynhn *dyn;
- THREAD_OFF(isis->t_dync_clean);
+ EVENT_OFF(isis->t_dync_clean);
for (ALL_LIST_ELEMENTS(isis->dyn_cache, node, nnode, dyn)) {
list_delete_node(isis->dyn_cache, node);
list_delete(&isis->dyn_cache);
}
-static void dyn_cache_cleanup(struct thread *thread)
+static void dyn_cache_cleanup(struct event *thread)
{
struct listnode *node, *nnode;
struct isis_dynhn *dyn;
time_t now = time(NULL);
struct isis *isis = NULL;
- isis = THREAD_ARG(thread);
+ isis = EVENT_ARG(thread);
isis->t_dync_clean = NULL;
XFREE(MTYPE_ISIS_DYNHN, dyn);
}
- thread_add_timer(master, dyn_cache_cleanup, isis, 120,
+ event_add_timer(master, dyn_cache_cleanup, isis, 120,
&isis->t_dync_clean);
}
#include "if.h"
#include "linklist.h"
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "hash.h"
#include "prefix.h"
#include "stream.h"
if (!circuit->is_passive) {
if (level == 1) {
- thread_add_timer(master, send_l1_psnp, circuit,
- isis_jitter(circuit->psnp_interval[0],
- PSNP_JITTER),
- &circuit->t_send_psnp[0]);
+ event_add_timer(master, send_l1_psnp, circuit,
+ isis_jitter(circuit->psnp_interval[0],
+ PSNP_JITTER),
+ &circuit->t_send_psnp[0]);
} else {
- thread_add_timer(master, send_l2_psnp, circuit,
- isis_jitter(circuit->psnp_interval[1],
- PSNP_JITTER),
- &circuit->t_send_psnp[1]);
+ event_add_timer(master, send_l2_psnp, circuit,
+ isis_jitter(circuit->psnp_interval[1],
+ PSNP_JITTER),
+ &circuit->t_send_psnp[1]);
}
}
if (circuit->circ_type == CIRCUIT_T_BROADCAST) {
- thread_add_timer(master, isis_run_dr,
- &circuit->level_arg[level - 1],
- 2 * circuit->hello_interval[level - 1],
- &circuit->u.bc.t_run_dr[level - 1]);
+ event_add_timer(master, isis_run_dr,
+ &circuit->level_arg[level - 1],
+ 2 * circuit->hello_interval[level - 1],
+ &circuit->u.bc.t_run_dr[level - 1]);
send_hello_sched(circuit, level, TRIGGERED_IIH_DELAY);
circuit->u.bc.lan_neighs[level - 1] = list_new();
circuit->area->area_tag, circuit->circuit_id,
circuit->interface->name, level);
- THREAD_OFF(circuit->t_send_csnp[idx]);
- THREAD_OFF(circuit->t_send_psnp[idx]);
+ EVENT_OFF(circuit->t_send_csnp[idx]);
+ EVENT_OFF(circuit->t_send_psnp[idx]);
if (circuit->circ_type == CIRCUIT_T_BROADCAST) {
- THREAD_OFF(circuit->u.bc.t_send_lan_hello[idx]);
- THREAD_OFF(circuit->u.bc.t_run_dr[idx]);
- THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[idx]);
+ EVENT_OFF(circuit->u.bc.t_send_lan_hello[idx]);
+ EVENT_OFF(circuit->u.bc.t_run_dr[idx]);
+ EVENT_OFF(circuit->u.bc.t_refresh_pseudo_lsp[idx]);
circuit->lsp_regenerate_pending[idx] = 0;
circuit->u.bc.run_dr_elect[idx] = 0;
circuit->u.bc.is_dr[idx] = 0;
/* events supporting code */
-void isis_event_dis_status_change(struct thread *thread)
+void isis_event_dis_status_change(struct event *thread)
{
struct isis_circuit *circuit;
- circuit = THREAD_ARG(thread);
+ circuit = EVENT_ARG(thread);
/* invalid arguments */
if (!circuit || !circuit->area)
/*
* Events related to adjacencies
*/
-void isis_event_dis_status_change(struct thread *thread);
+void isis_event_dis_status_change(struct event *thread);
/*
* Error events
#include "monotime.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "table.h"
#include "vty.h"
if (ldp_sync_info->state == LDP_IGP_SYNC_STATE_REQUIRED_NOT_UP)
ldp_sync_info->state = LDP_IGP_SYNC_STATE_REQUIRED_UP;
- THREAD_OFF(ldp_sync_info->t_holddown);
+ EVENT_OFF(ldp_sync_info->t_holddown);
isis_ldp_sync_set_if_metric(circuit, true);
}
if (ldp_sync_info &&
ldp_sync_info->enabled == LDP_IGP_SYNC_ENABLED &&
ldp_sync_info->state != LDP_IGP_SYNC_STATE_NOT_REQUIRED) {
- THREAD_OFF(ldp_sync_info->t_holddown);
+ EVENT_OFF(ldp_sync_info->t_holddown);
ldp_sync_info->state = LDP_IGP_SYNC_STATE_REQUIRED_NOT_UP;
isis_ldp_sync_set_if_metric(circuit, true);
}
/*
* LDP-SYNC holddown timer routines
*/
-static void isis_ldp_sync_holddown_timer(struct thread *thread)
+static void isis_ldp_sync_holddown_timer(struct event *thread)
{
struct isis_circuit *circuit;
struct ldp_sync_info *ldp_sync_info;
* didn't receive msg from LDP indicating sync-complete
* restore interface cost to original value
*/
- circuit = THREAD_ARG(thread);
+ circuit = EVENT_ARG(thread);
if (circuit->ldp_sync_info == NULL)
return;
ils_debug("%s: start holddown timer for %s time %d", __func__,
circuit->interface->name, ldp_sync_info->holddown);
- thread_add_timer(master, isis_ldp_sync_holddown_timer,
- circuit, ldp_sync_info->holddown,
- &ldp_sync_info->t_holddown);
+ event_add_timer(master, isis_ldp_sync_holddown_timer, circuit,
+ ldp_sync_info->holddown, &ldp_sync_info->t_holddown);
}
/*
if (!CHECK_FLAG(area->ldp_sync_cmd.flags, LDP_SYNC_FLAG_ENABLE))
return;
- THREAD_OFF(ldp_sync_info->t_holddown);
+ EVENT_OFF(ldp_sync_info->t_holddown);
ldp_sync_info->state = LDP_IGP_SYNC_STATE_NOT_REQUIRED;
isis_ldp_sync_set_if_metric(circuit, true);
}
break;
case LDP_IGP_SYNC_STATE_REQUIRED_NOT_UP:
if (ldp_sync_info->t_holddown != NULL) {
- struct timeval remain = thread_timer_remain(
- ldp_sync_info->t_holddown);
+ struct timeval remain =
+ event_timer_remain(ldp_sync_info->t_holddown);
vty_out(vty,
" Holddown timer is running %lld.%03lld remaining\n",
(long long)remain.tv_sec,
return rlfa_tree_find(&spftree->lfa.remote.rlfas, &s);
}
-static void isis_area_verify_routes_cb(struct thread *thread)
+static void isis_area_verify_routes_cb(struct event *thread)
{
- struct isis_area *area = THREAD_ARG(thread);
+ struct isis_area *area = EVENT_ARG(thread);
if (IS_DEBUG_LFA)
zlog_debug("ISIS-LFA: updating RLFAs in the RIB");
spftree->route_table_backup);
spftree->lfa.protection_counters.rlfa[vertex->N.ip.priority] += 1;
- THREAD_OFF(area->t_rlfa_rib_update);
- thread_add_timer(master, isis_area_verify_routes_cb, area, 2,
- &area->t_rlfa_rib_update);
+ EVENT_OFF(area->t_rlfa_rib_update);
+ event_add_timer(master, isis_area_verify_routes_cb, area, 2,
+ &area->t_rlfa_rib_update);
return 0;
}
isis_route_delete(area, rn, spftree->route_table_backup);
spftree->lfa.protection_counters.rlfa[vertex->N.ip.priority] -= 1;
- THREAD_OFF(area->t_rlfa_rib_update);
- thread_add_timer(master, isis_area_verify_routes_cb, area, 2,
- &area->t_rlfa_rib_update);
+ EVENT_OFF(area->t_rlfa_rib_update);
+ event_add_timer(master, isis_area_verify_routes_cb, area, 2,
+ &area->t_rlfa_rib_update);
}
void isis_rlfa_list_init(struct isis_spftree *spftree)
#include <zebra.h>
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "stream.h"
#include "memory.h"
DEFINE_MTYPE_STATIC(ISISD, ISIS_LSP, "ISIS LSP");
-static void lsp_refresh(struct thread *thread);
-static void lsp_l1_refresh_pseudo(struct thread *thread);
-static void lsp_l2_refresh_pseudo(struct thread *thread);
+static void lsp_refresh(struct event *thread);
+static void lsp_l1_refresh_pseudo(struct event *thread);
+static void lsp_l2_refresh_pseudo(struct event *thread);
static void lsp_destroy(struct isis_lsp *lsp);
/*
* Unset the overload bit after the timer expires
*/
-void set_overload_on_start_timer(struct thread *thread)
+void set_overload_on_start_timer(struct event *thread)
{
- struct isis_area *area = THREAD_ARG(thread);
+ struct isis_area *area = EVENT_ARG(thread);
assert(area);
area->t_overload_on_startup_timer = NULL;
return ISIS_ERROR;
/* Check if config is still being processed */
- if (thread_is_scheduled(t_isis_cfg))
+ if (event_is_scheduled(t_isis_cfg))
return ISIS_OK;
memset(&lspid, 0, ISIS_SYS_ID_LEN + 2);
overload_time = isis_restart_read_overload_time(area);
if (overload_time > 0) {
isis_area_overload_bit_set(area, true);
- thread_add_timer(master, set_overload_on_start_timer,
- area, overload_time,
- &area->t_overload_on_startup_timer);
+ event_add_timer(master, set_overload_on_start_timer,
+ area, overload_time,
+ &area->t_overload_on_startup_timer);
}
device_startup = false;
}
refresh_time = lsp_refresh_time(newlsp, rem_lifetime);
- THREAD_OFF(area->t_lsp_refresh[level - 1]);
+ EVENT_OFF(area->t_lsp_refresh[level - 1]);
area->lsp_regenerate_pending[level - 1] = 0;
- thread_add_timer(master, lsp_refresh,
- &area->lsp_refresh_arg[level - 1], refresh_time,
- &area->t_lsp_refresh[level - 1]);
+ event_add_timer(master, lsp_refresh, &area->lsp_refresh_arg[level - 1],
+ refresh_time, &area->t_lsp_refresh[level - 1]);
if (IS_DEBUG_UPDATE_PACKETS) {
zlog_debug("ISIS-Upd (%s): Building L%d LSP %s, len %hu, seq 0x%08x, cksum 0x%04hx, lifetime %hus refresh %hus",
lsp_seqno_update(lsp);
refresh_time = lsp_refresh_time(lsp, rem_lifetime);
- thread_add_timer(master, lsp_refresh,
- &area->lsp_refresh_arg[level - 1], refresh_time,
- &area->t_lsp_refresh[level - 1]);
+ event_add_timer(master, lsp_refresh, &area->lsp_refresh_arg[level - 1],
+ refresh_time, &area->t_lsp_refresh[level - 1]);
area->lsp_regenerate_pending[level - 1] = 0;
if (IS_DEBUG_UPDATE_PACKETS) {
/*
* Something has changed or periodic refresh -> regenerate LSP
*/
-static void lsp_refresh(struct thread *thread)
+static void lsp_refresh(struct event *thread)
{
- struct lsp_refresh_arg *arg = THREAD_ARG(thread);
+ struct lsp_refresh_arg *arg = EVENT_ARG(thread);
assert(arg);
* Note: in case of a BFD 'down' message the refresh is
* scheduled once again just to be sure
*/
- struct timeval remain = thread_timer_remain(
+ struct timeval remain = event_timer_remain(
area->t_lsp_refresh[lvl - 1]);
sched_debug(
"ISIS (%s): Regeneration is already pending, nothing todo. (Due in %lld.%03lld seconds)",
"ISIS (%s): Will schedule regen timer. Last run was: %lld, Now is: %lld",
area->area_tag, (long long)lsp->last_generated,
(long long)now);
- THREAD_OFF(area->t_lsp_refresh[lvl - 1]);
+ EVENT_OFF(area->t_lsp_refresh[lvl - 1]);
diff = now - lsp->last_generated;
if (diff < area->lsp_gen_interval[lvl - 1]
&& !(area->bfd_signalled_down)) {
}
area->lsp_regenerate_pending[lvl - 1] = 1;
- thread_add_timer_msec(master, lsp_refresh,
- &area->lsp_refresh_arg[lvl - 1],
- timeout,
- &area->t_lsp_refresh[lvl - 1]);
+ event_add_timer_msec(master, lsp_refresh,
+ &area->lsp_refresh_arg[lvl - 1], timeout,
+ &area->t_lsp_refresh[lvl - 1]);
}
if (all_pseudo) {
lsp_flood(lsp, NULL);
refresh_time = lsp_refresh_time(lsp, rem_lifetime);
- THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
+ EVENT_OFF(circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
circuit->lsp_regenerate_pending[level - 1] = 0;
if (level == IS_LEVEL_1)
- thread_add_timer(
- master, lsp_l1_refresh_pseudo, circuit, refresh_time,
- &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
+ event_add_timer(master, lsp_l1_refresh_pseudo, circuit,
+ refresh_time,
+ &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
else if (level == IS_LEVEL_2)
- thread_add_timer(
- master, lsp_l2_refresh_pseudo, circuit, refresh_time,
- &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
+ event_add_timer(master, lsp_l2_refresh_pseudo, circuit,
+ refresh_time,
+ &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
if (IS_DEBUG_UPDATE_PACKETS) {
zlog_debug(
refresh_time = lsp_refresh_time(lsp, rem_lifetime);
if (level == IS_LEVEL_1)
- thread_add_timer(
- master, lsp_l1_refresh_pseudo, circuit, refresh_time,
- &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
+ event_add_timer(master, lsp_l1_refresh_pseudo, circuit,
+ refresh_time,
+ &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
else if (level == IS_LEVEL_2)
- thread_add_timer(
- master, lsp_l2_refresh_pseudo, circuit, refresh_time,
- &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
+ event_add_timer(master, lsp_l2_refresh_pseudo, circuit,
+ refresh_time,
+ &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
if (IS_DEBUG_UPDATE_PACKETS) {
zlog_debug(
/*
* Something has changed or periodic refresh -> regenerate pseudo LSP
*/
-static void lsp_l1_refresh_pseudo(struct thread *thread)
+static void lsp_l1_refresh_pseudo(struct event *thread)
{
struct isis_circuit *circuit;
uint8_t id[ISIS_SYS_ID_LEN + 2];
- circuit = THREAD_ARG(thread);
+ circuit = EVENT_ARG(thread);
circuit->u.bc.t_refresh_pseudo_lsp[0] = NULL;
circuit->lsp_regenerate_pending[0] = 0;
lsp_regenerate_pseudo(circuit, IS_LEVEL_1);
}
-static void lsp_l2_refresh_pseudo(struct thread *thread)
+static void lsp_l2_refresh_pseudo(struct event *thread)
{
struct isis_circuit *circuit;
uint8_t id[ISIS_SYS_ID_LEN + 2];
- circuit = THREAD_ARG(thread);
+ circuit = EVENT_ARG(thread);
circuit->u.bc.t_refresh_pseudo_lsp[1] = NULL;
circuit->lsp_regenerate_pending[1] = 0;
}
if (circuit->lsp_regenerate_pending[lvl - 1]) {
- struct timeval remain = thread_timer_remain(
+ struct timeval remain = event_timer_remain(
circuit->u.bc.t_refresh_pseudo_lsp[lvl - 1]);
sched_debug(
"ISIS (%s): Regenerate is already pending, nothing todo. (Due in %lld.%03lld seconds)",
"ISIS (%s): Will schedule PSN regen timer. Last run was: %lld, Now is: %lld",
area->area_tag, (long long)lsp->last_generated,
(long long)now);
- THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[lvl - 1]);
+ EVENT_OFF(circuit->u.bc.t_refresh_pseudo_lsp[lvl - 1]);
diff = now - lsp->last_generated;
if (diff < circuit->area->lsp_gen_interval[lvl - 1]) {
timeout =
circuit->lsp_regenerate_pending[lvl - 1] = 1;
if (lvl == IS_LEVEL_1) {
- thread_add_timer_msec(
+ event_add_timer_msec(
master, lsp_l1_refresh_pseudo, circuit, timeout,
&circuit->u.bc.t_refresh_pseudo_lsp[lvl - 1]);
} else if (lvl == IS_LEVEL_2) {
- thread_add_timer_msec(
+ event_add_timer_msec(
master, lsp_l2_refresh_pseudo, circuit, timeout,
&circuit->u.bc.t_refresh_pseudo_lsp[lvl - 1]);
}
* Walk through LSPs for an area
* - set remaining lifetime
*/
-void lsp_tick(struct thread *thread)
+void lsp_tick(struct event *thread)
{
struct isis_area *area;
struct isis_lsp *lsp;
uint16_t rem_lifetime;
bool fabricd_sync_incomplete = false;
- area = THREAD_ARG(thread);
+ area = EVENT_ARG(thread);
assert(area);
area->t_tick = NULL;
- thread_add_timer(master, lsp_tick, area, 1, &area->t_tick);
+ event_add_timer(master, lsp_tick, area, 1, &area->t_tick);
struct isis_circuit *fabricd_init_c = fabricd_initial_sync_circuit(area);
void lsp_db_init(struct lspdb_head *head);
void lsp_db_fini(struct lspdb_head *head);
-void lsp_tick(struct thread *thread);
-void set_overload_on_start_timer(struct thread *thread);
+void lsp_tick(struct event *thread);
+void set_overload_on_start_timer(struct event *thread);
int lsp_generate(struct isis_area *area, int level);
#define lsp_regenerate_schedule(area, level, all_pseudo) \
#include <zebra.h>
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include "log.h"
#include <lib/version.h>
#include "command.h"
{0}};
/* Master of threads. */
-struct thread_master *master;
+struct event_loop *master;
/*
* Prototypes.
/* clang-format on */
-static void isis_config_finish(struct thread *t)
+static void isis_config_finish(struct event *t)
{
struct listnode *node, *inode;
struct isis *isis;
{
/* Max wait time for config to load before generating lsp */
#define ISIS_PRE_CONFIG_MAX_WAIT_SECONDS 600
- THREAD_OFF(t_isis_cfg);
- thread_add_timer(im->master, isis_config_finish, NULL,
- ISIS_PRE_CONFIG_MAX_WAIT_SECONDS, &t_isis_cfg);
+ EVENT_OFF(t_isis_cfg);
+ event_add_timer(im->master, isis_config_finish, NULL,
+ ISIS_PRE_CONFIG_MAX_WAIT_SECONDS, &t_isis_cfg);
}
static void isis_config_end(void)
/* If ISIS config processing thread isn't running, then
* we can return and rely it's properly handled.
*/
- if (!thread_is_scheduled(t_isis_cfg))
+ if (!event_is_scheduled(t_isis_cfg))
return;
- THREAD_OFF(t_isis_cfg);
+ EVENT_OFF(t_isis_cfg);
isis_config_finish(t_isis_cfg);
}
#include <zebra.h>
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "log.h"
#include "stream.h"
adj);
/* lets take care of the expiry */
- THREAD_OFF(adj->t_expire);
- thread_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
- &adj->t_expire);
+ EVENT_OFF(adj->t_expire);
+ event_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
+ &adj->t_expire);
/* While fabricds initial sync is in progress, ignore hellos from other
* interfaces than the one we are performing the initial sync on. */
: iih->circuit->u.bc.l2_desig_is;
if (memcmp(dis, iih->dis, ISIS_SYS_ID_LEN + 1)) {
- thread_add_event(master, isis_event_dis_status_change,
- iih->circuit, 0, NULL);
+ event_add_event(master, isis_event_dis_status_change,
+ iih->circuit, 0, NULL);
memcpy(dis, iih->dis, ISIS_SYS_ID_LEN + 1);
}
}
adj);
/* lets take care of the expiry */
- THREAD_OFF(adj->t_expire);
- thread_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
- &adj->t_expire);
+ EVENT_OFF(adj->t_expire);
+ event_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
+ &adj->t_expire);
/*
* If the snpa for this circuit is found from LAN Neighbours TLV
if (idrp == ISO9542_ESIS) {
flog_err(EC_LIB_DEVELOPMENT,
"No support for ES-IS packet IDRP=%hhx", idrp);
+ pdu_counter_count(circuit->area->pdu_drop_counters, pdu_type);
return ISIS_ERROR;
}
if (idrp != ISO10589_ISIS) {
flog_err(EC_ISIS_PACKET, "Not an IS-IS packet IDRP=%hhx",
idrp);
+ pdu_counter_count(circuit->area->pdu_drop_counters, pdu_type);
return ISIS_ERROR;
}
isis_notif_version_skew(circuit, version1, raw_pdu,
sizeof(raw_pdu));
#endif /* ifndef FABRICD */
+ pdu_counter_count(circuit->area->pdu_drop_counters, pdu_type);
return ISIS_WARNING;
}
isis_notif_id_len_mismatch(circuit, id_len, raw_pdu,
sizeof(raw_pdu));
#endif /* ifndef FABRICD */
+ pdu_counter_count(circuit->area->pdu_drop_counters, pdu_type);
return ISIS_ERROR;
}
uint8_t expected_length;
if (pdu_size(pdu_type, &expected_length)) {
zlog_warn("Unsupported ISIS PDU %hhu", pdu_type);
+ pdu_counter_count(circuit->area->pdu_drop_counters, pdu_type);
return ISIS_WARNING;
}
flog_err(EC_ISIS_PACKET,
"Expected fixed header length = %hhu but got %hhu",
expected_length, length);
+ pdu_counter_count(circuit->area->pdu_drop_counters, pdu_type);
return ISIS_ERROR;
}
flog_err(
EC_ISIS_PACKET,
"PDU is too short to contain fixed header of given PDU type.");
+ pdu_counter_count(circuit->area->pdu_drop_counters, pdu_type);
return ISIS_ERROR;
}
isis_notif_version_skew(circuit, version2, raw_pdu,
sizeof(raw_pdu));
#endif /* ifndef FABRICD */
+ pdu_counter_count(circuit->area->pdu_drop_counters, pdu_type);
return ISIS_WARNING;
}
if (circuit->is_passive) {
zlog_warn("Received ISIS PDU on passive circuit %s",
circuit->interface->name);
+ pdu_counter_count(circuit->area->pdu_drop_counters, pdu_type);
return ISIS_WARNING;
}
isis_notif_max_area_addr_mismatch(circuit, max_area_addrs,
raw_pdu, sizeof(raw_pdu));
#endif /* ifndef FABRICD */
+ pdu_counter_count(circuit->area->pdu_drop_counters, pdu_type);
return ISIS_ERROR;
}
case L1_LAN_HELLO:
case L2_LAN_HELLO:
case P2P_HELLO:
- if (fabricd && pdu_type != P2P_HELLO)
+ if (fabricd && pdu_type != P2P_HELLO) {
+ pdu_counter_count(circuit->area->pdu_drop_counters,
+ pdu_type);
return ISIS_ERROR;
+ }
+
retval = process_hello(pdu_type, circuit, ssnpa);
break;
case L1_LINK_STATE:
case L2_LINK_STATE:
case FS_LINK_STATE:
- if (fabricd
- && pdu_type != L2_LINK_STATE
- && pdu_type != FS_LINK_STATE)
+ if (fabricd && pdu_type != L2_LINK_STATE &&
+ pdu_type != FS_LINK_STATE) {
+ pdu_counter_count(circuit->area->pdu_drop_counters,
+ pdu_type);
return ISIS_ERROR;
+ }
+
retval = process_lsp(pdu_type, circuit, ssnpa, max_area_addrs);
break;
case L1_COMPLETE_SEQ_NUM:
retval = process_snp(pdu_type, circuit, ssnpa);
break;
default:
+ pdu_counter_count(circuit->area->pdu_drop_counters, pdu_type);
return ISIS_ERROR;
}
+ if (retval != ISIS_OK)
+ pdu_counter_count(circuit->area->pdu_drop_counters, pdu_type);
+
return retval;
}
-void isis_receive(struct thread *thread)
+void isis_receive(struct event *thread)
{
struct isis_circuit *circuit;
uint8_t ssnpa[ETH_ALEN];
/*
* Get the circuit
*/
- circuit = THREAD_ARG(thread);
+ circuit = EVENT_ARG(thread);
assert(circuit);
circuit->t_read = NULL;
return retval;
}
-static void send_hello_cb(struct thread *thread)
+static void send_hello_cb(struct event *thread)
{
- struct isis_circuit_arg *arg = THREAD_ARG(thread);
+ struct isis_circuit_arg *arg = EVENT_ARG(thread);
assert(arg);
struct isis_circuit *circuit = arg->circuit;
}
static void _send_hello_sched(struct isis_circuit *circuit,
- struct thread **threadp,
- int level, long delay)
+ struct event **threadp, int level, long delay)
{
if (*threadp) {
- if (thread_timer_remain_msec(*threadp) < (unsigned long)delay)
+ if (event_timer_remain_msec(*threadp) < (unsigned long)delay)
return;
- THREAD_OFF(*threadp);
+ EVENT_OFF(*threadp);
}
- thread_add_timer_msec(master, send_hello_cb,
- &circuit->level_arg[level - 1],
- isis_jitter(delay, IIH_JITTER),
- threadp);
+ event_add_timer_msec(master, send_hello_cb,
+ &circuit->level_arg[level - 1],
+ isis_jitter(delay, IIH_JITTER), threadp);
}
void send_hello_sched(struct isis_circuit *circuit, int level, long delay)
return ISIS_OK;
}
-void send_l1_csnp(struct thread *thread)
+void send_l1_csnp(struct event *thread)
{
struct isis_circuit *circuit;
- circuit = THREAD_ARG(thread);
+ circuit = EVENT_ARG(thread);
assert(circuit);
circuit->t_send_csnp[0] = NULL;
send_csnp(circuit, 1);
}
/* set next timer thread */
- thread_add_timer(master, send_l1_csnp, circuit,
- isis_jitter(circuit->csnp_interval[0], CSNP_JITTER),
- &circuit->t_send_csnp[0]);
+ event_add_timer(master, send_l1_csnp, circuit,
+ isis_jitter(circuit->csnp_interval[0], CSNP_JITTER),
+ &circuit->t_send_csnp[0]);
}
-void send_l2_csnp(struct thread *thread)
+void send_l2_csnp(struct event *thread)
{
struct isis_circuit *circuit;
- circuit = THREAD_ARG(thread);
+ circuit = EVENT_ARG(thread);
assert(circuit);
circuit->t_send_csnp[1] = NULL;
send_csnp(circuit, 2);
}
/* set next timer thread */
- thread_add_timer(master, send_l2_csnp, circuit,
- isis_jitter(circuit->csnp_interval[1], CSNP_JITTER),
- &circuit->t_send_csnp[1]);
+ event_add_timer(master, send_l2_csnp, circuit,
+ isis_jitter(circuit->csnp_interval[1], CSNP_JITTER),
+ &circuit->t_send_csnp[1]);
}
/*
return ISIS_OK;
}
-void send_l1_psnp(struct thread *thread)
+void send_l1_psnp(struct event *thread)
{
struct isis_circuit *circuit;
- circuit = THREAD_ARG(thread);
+ circuit = EVENT_ARG(thread);
assert(circuit);
circuit->t_send_psnp[0] = NULL;
send_psnp(1, circuit);
/* set next timer thread */
- thread_add_timer(master, send_l1_psnp, circuit,
- isis_jitter(circuit->psnp_interval[0], PSNP_JITTER),
- &circuit->t_send_psnp[0]);
+ event_add_timer(master, send_l1_psnp, circuit,
+ isis_jitter(circuit->psnp_interval[0], PSNP_JITTER),
+ &circuit->t_send_psnp[0]);
}
/*
* 7.3.15.4 action on expiration of partial SNP interval
* level 2
*/
-void send_l2_psnp(struct thread *thread)
+void send_l2_psnp(struct event *thread)
{
struct isis_circuit *circuit;
- circuit = THREAD_ARG(thread);
+ circuit = EVENT_ARG(thread);
assert(circuit);
circuit->t_send_psnp[1] = NULL;
send_psnp(2, circuit);
/* set next timer thread */
- thread_add_timer(master, send_l2_psnp, circuit,
- isis_jitter(circuit->psnp_interval[1], PSNP_JITTER),
- &circuit->t_send_psnp[1]);
+ event_add_timer(master, send_l2_psnp, circuit,
+ isis_jitter(circuit->psnp_interval[1], PSNP_JITTER),
+ &circuit->t_send_psnp[1]);
}
/*
/*
* Function for receiving IS-IS PDUs
*/
-void isis_receive(struct thread *thread);
+void isis_receive(struct event *thread);
/*
* calling arguments for snp_process ()
*/
void send_hello_sched(struct isis_circuit *circuit, int level, long delay);
int send_csnp(struct isis_circuit *circuit, int level);
-void send_l1_csnp(struct thread *thread);
-void send_l2_csnp(struct thread *thread);
-void send_l1_psnp(struct thread *thread);
-void send_l2_psnp(struct thread *thread);
+void send_l1_csnp(struct event *thread);
+void send_l2_csnp(struct event *thread);
+void send_l1_psnp(struct event *thread);
+void send_l2_psnp(struct event *thread);
void send_lsp(struct isis_circuit *circuit,
struct isis_lsp *lsp, enum isis_tx_type tx_type);
void fill_fixed_hdr(uint8_t pdu_type, struct stream *stream);
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "vty.h"
#include "log.h"
#include "plist.h"
#include "routemap.h"
#include "table.h"
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "isis_constants.h"
/* Register ISIS-MIB. */
-static int isis_snmp_init(struct thread_master *tm)
+static int isis_snmp_init(struct event_loop *tm)
{
struct isis_func_to_prefix *h2f = isis_func_to_prefix_arr;
struct variable *v;
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "vty.h"
#include "log.h"
{
list_delete(&vertex->Adj_N);
list_delete(&vertex->parents);
- if (vertex->firsthops) {
- hash_clean(vertex->firsthops, NULL);
- hash_free(vertex->firsthops);
- vertex->firsthops = NULL;
- }
+ hash_clean_and_free(&vertex->firsthops, NULL);
memset(vertex, 0, sizeof(struct isis_vertex));
XFREE(MTYPE_ISIS_VERTEX, vertex);
void isis_spftree_del(struct isis_spftree *spftree)
{
- hash_clean(spftree->prefix_sids, NULL);
- hash_free(spftree->prefix_sids);
+ hash_clean_and_free(&spftree->prefix_sids, NULL);
isis_zebra_rlfa_unregister_all(spftree);
isis_rlfa_list_clear(spftree);
list_delete(&spftree->lfa.remote.pc_spftrees);
family, nexthop_ip, ifindex);
}
-static void isis_run_spf_cb(struct thread *thread)
+static void isis_run_spf_cb(struct event *thread)
{
- struct isis_spf_run *run = THREAD_ARG(thread);
+ struct isis_spf_run *run = EVENT_ARG(thread);
struct isis_area *area = run->area;
int level = run->level;
int have_run = 0;
area->area_tag, level, diff, func, file, line);
}
- THREAD_OFF(area->t_rlfa_rib_update);
+ EVENT_OFF(area->t_rlfa_rib_update);
if (area->spf_delay_ietf[level - 1]) {
/* Need to call schedule function also if spf delay is running
* to
if (area->spf_timer[level - 1])
return ISIS_OK;
- thread_add_timer_msec(master, isis_run_spf_cb,
- isis_run_spf_arg(area, level), delay,
- &area->spf_timer[level - 1]);
+ event_add_timer_msec(master, isis_run_spf_cb,
+ isis_run_spf_arg(area, level), delay,
+ &area->spf_timer[level - 1]);
return ISIS_OK;
}
timer = area->min_spf_interval[level - 1] - diff;
}
- thread_add_timer(master, isis_run_spf_cb, isis_run_spf_arg(area, level),
- timer, &area->spf_timer[level - 1]);
+ event_add_timer(master, isis_run_spf_cb, isis_run_spf_arg(area, level),
+ timer, &area->spf_timer[level - 1]);
if (IS_DEBUG_SPF_EVENTS)
zlog_debug("ISIS-SPF (%s) L%d SPF scheduled %ld sec from now",
*
* @return 1 on success
*/
-static void sr_start_label_manager(struct thread *start)
+static void sr_start_label_manager(struct event *start)
{
struct isis_area *area;
- area = THREAD_ARG(start);
+ area = EVENT_ARG(start);
/* re-attempt to start SR & Label Manager connection */
isis_sr_start(area);
if (!isis_zebra_label_manager_ready())
if (isis_zebra_label_manager_connect() < 0) {
/* Re-attempt to connect to Label Manager in 1 sec. */
- thread_add_timer(master, sr_start_label_manager, area,
- 1, &srdb->t_start_lm);
+ event_add_timer(master, sr_start_label_manager, area, 1,
+ &srdb->t_start_lm);
return -1;
}
area->area_tag);
/* Disable any re-attempt to connect to Label Manager */
- THREAD_OFF(srdb->t_start_lm);
+ EVENT_OFF(srdb->t_start_lm);
/* Uninstall all local Adjacency-SIDs. */
for (ALL_LIST_ELEMENTS(area->srdb.adj_sids, node, nnode, sra))
bool enabled;
/* Thread timer to start Label Manager */
- struct thread *t_start_lm;
+ struct event *t_start_lm;
/* List of local Adjacency-SIDs. */
struct list *adj_sids;
#include <math.h>
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "stream.h"
#include "memory.h"
struct isis_lsp *lsp;
enum isis_tx_type type;
bool is_retry;
- struct thread *retry;
+ struct event *retry;
struct isis_tx_queue *queue;
};
{
struct isis_tx_queue_entry *e = element;
- THREAD_OFF(e->retry);
+ EVENT_OFF(e->retry);
XFREE(MTYPE_TX_QUEUE_ENTRY, e);
}
void isis_tx_queue_free(struct isis_tx_queue *queue)
{
- hash_clean(queue->hash, tx_queue_element_free);
- hash_free(queue->hash);
+ hash_clean_and_free(&queue->hash, tx_queue_element_free);
XFREE(MTYPE_TX_QUEUE, queue);
}
return hash_lookup(queue->hash, &e);
}
-static void tx_queue_send_event(struct thread *thread)
+static void tx_queue_send_event(struct event *thread)
{
- struct isis_tx_queue_entry *e = THREAD_ARG(thread);
+ struct isis_tx_queue_entry *e = EVENT_ARG(thread);
struct isis_tx_queue *queue = e->queue;
- thread_add_timer(master, tx_queue_send_event, e, 5, &e->retry);
+ event_add_timer(master, tx_queue_send_event, e, 5, &e->retry);
if (e->is_retry)
queue->circuit->area->lsp_rxmt_count++;
e->type = type;
- THREAD_OFF(e->retry);
- thread_add_event(master, tx_queue_send_event, e, 0, &e->retry);
+ EVENT_OFF(e->retry);
+ event_add_event(master, tx_queue_send_event, e, 0, &e->retry);
e->is_retry = false;
}
func, file, line);
}
- THREAD_OFF(e->retry);
+ EVENT_OFF(e->retry);
hash_release(queue->hash, e);
XFREE(MTYPE_TX_QUEUE_ENTRY, e);
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "command.h"
#include "memory.h"
#include "log.h"
[ZEBRA_CLIENT_CLOSE_NOTIFY] = isis_zebra_client_close_notify,
};
-void isis_zebra_init(struct thread_master *master, int instance)
+void isis_zebra_init(struct event_loop *master, int instance)
{
/* Initialize asynchronous zclient. */
zclient = zclient_new(master, &zclient_options_default, isis_handlers,
};
#define CHUNK_SIZE 64
-void isis_zebra_init(struct thread_master *master, int instance);
+void isis_zebra_init(struct event_loop *master, int instance);
void isis_zebra_stop(void);
struct isis_route_info;
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "command.h"
#include "log.h"
struct isis_master *im;
/* ISIS config processing thread */
-struct thread *t_isis_cfg;
+struct event *t_isis_cfg;
#ifndef FABRICD
DEFINE_HOOK(isis_hook_db_overload, (const struct isis_area *area), (area));
return NULL;
}
-void isis_master_init(struct thread_master *master)
+void isis_master_init(struct event_loop *master)
{
memset(&isis_master, 0, sizeof(isis_master));
im = &isis_master;
area->area_addrs->del = delete_area_addr;
if (!CHECK_FLAG(im->options, F_ISIS_UNIT_TEST))
- thread_add_timer(master, lsp_tick, area, 1, &area->t_tick);
+ event_add_timer(master, lsp_tick, area, 1, &area->t_tick);
flags_initialize(&area->flags);
isis_sr_area_init(area);
spftree_area_del(area);
if (area->spf_timer[0])
- isis_spf_timer_free(THREAD_ARG(area->spf_timer[0]));
- THREAD_OFF(area->spf_timer[0]);
+ isis_spf_timer_free(EVENT_ARG(area->spf_timer[0]));
+ EVENT_OFF(area->spf_timer[0]);
if (area->spf_timer[1])
- isis_spf_timer_free(THREAD_ARG(area->spf_timer[1]));
- THREAD_OFF(area->spf_timer[1]);
+ isis_spf_timer_free(EVENT_ARG(area->spf_timer[1]));
+ EVENT_OFF(area->spf_timer[1]);
spf_backoff_free(area->spf_delay_ietf[0]);
spf_backoff_free(area->spf_delay_ietf[1]);
isis_lfa_tiebreakers_clear(area, ISIS_LEVEL1);
isis_lfa_tiebreakers_clear(area, ISIS_LEVEL2);
- THREAD_OFF(area->t_tick);
- THREAD_OFF(area->t_lsp_refresh[0]);
- THREAD_OFF(area->t_lsp_refresh[1]);
- THREAD_OFF(area->t_rlfa_rib_update);
+ EVENT_OFF(area->t_tick);
+ EVENT_OFF(area->t_lsp_refresh[0]);
+ EVENT_OFF(area->t_lsp_refresh[1]);
+ EVENT_OFF(area->t_rlfa_rib_update);
- thread_cancel_event(master, area);
+ event_cancel_event(master, area);
listnode_delete(area->isis->area_list, area);
vty_out(vty, " Level-%d:\n", level);
vty_out(vty, " SPF delay status: ");
if (area->spf_timer[level - 1]) {
- struct timeval remain = thread_timer_remain(
+ struct timeval remain = event_timer_remain(
area->spf_timer[level - 1]);
vty_out(vty, "Pending, due in %lld msec\n",
(long long)remain.tv_sec * 1000
vty_out(vty, " RX counters per PDU type:\n");
pdu_counter_print(vty, " ", area->pdu_rx_counters);
+ vty_out(vty, " Drop counters per PDU type:\n");
+ pdu_counter_print(vty, " ", area->pdu_drop_counters);
+
vty_out(vty, " Advertise high metrics: %s\n",
area->advertise_high_metrics ? "Enabled" : "Disabled");
}
if (area->spf_timer[level - 1])
- isis_spf_timer_free(THREAD_ARG(area->spf_timer[level - 1]));
+ isis_spf_timer_free(EVENT_ARG(area->spf_timer[level - 1]));
- THREAD_OFF(area->spf_timer[level - 1]);
+ EVENT_OFF(area->spf_timer[level - 1]);
sched_debug(
"ISIS (%s): Resigned from L%d - canceling LSP regeneration timer.",
area->area_tag, level);
- THREAD_OFF(area->t_lsp_refresh[level - 1]);
+ EVENT_OFF(area->t_lsp_refresh[level - 1]);
area->lsp_regenerate_pending[level - 1] = 0;
}
} else {
/* Cancel overload on startup timer if it's running */
if (area->t_overload_on_startup_timer) {
- THREAD_OFF(area->t_overload_on_startup_timer);
+ EVENT_OFF(area->t_overload_on_startup_timer);
area->t_overload_on_startup_timer = NULL;
}
}
/* ISIS instance. */
struct list *isis;
/* ISIS thread master. */
- struct thread_master *master;
+ struct event_loop *master;
uint8_t options;
};
#define F_ISIS_UNIT_TEST 0x01
uint8_t max_area_addrs; /* maximumAreaAdresses */
struct area_addr *man_area_addrs; /* manualAreaAddresses */
time_t uptime; /* when did we start */
- struct thread *t_dync_clean; /* dynamic hostname cache cleanup thread */
+ struct event *t_dync_clean; /* dynamic hostname cache cleanup thread */
uint32_t circuit_ids_used[8]; /* 256 bits to track circuit ids 1 through 255 */
int snmp_notifications;
struct list *dyn_cache;
extern struct isis_master *im;
-extern struct thread *t_isis_cfg;
+extern struct event *t_isis_cfg;
enum spf_tree_id {
SPFTREE_IPV4 = 0,
struct list *circuit_list; /* IS-IS circuits */
struct list *adjacency_list; /* IS-IS adjacencies */
struct flags flags;
- struct thread *t_tick; /* LSP walker */
- struct thread *t_lsp_refresh[ISIS_LEVELS];
- struct thread *t_overload_on_startup_timer;
+ struct event *t_tick; /* LSP walker */
+ struct event *t_lsp_refresh[ISIS_LEVELS];
+ struct event *t_overload_on_startup_timer;
struct timeval last_lsp_refresh_event[ISIS_LEVELS];
- struct thread *t_rlfa_rib_update;
+ struct event *t_rlfa_rib_update;
/* t_lsp_refresh is used in two ways:
* a) regular refresh of LSPs
* b) (possibly throttled) updates to LSPs
struct spf_backoff *spf_delay_ietf[ISIS_LEVELS]; /*Structure with IETF
SPF algo
parameters*/
- struct thread *spf_timer[ISIS_LEVELS];
+ struct event *spf_timer[ISIS_LEVELS];
struct lsp_refresh_arg lsp_refresh_arg[ISIS_LEVELS];
pdu_counter_t pdu_tx_counters;
pdu_counter_t pdu_rx_counters;
+ pdu_counter_t pdu_drop_counters;
uint64_t lsp_rxmt_count;
/* Area counters */
DECLARE_HOOK(isis_area_overload_bit_update, (struct isis_area * area), (area));
void isis_terminate(void);
-void isis_master_init(struct thread_master *master);
+void isis_master_init(struct event_loop *master);
void isis_vrf_link(struct isis *isis, struct vrf *vrf);
void isis_vrf_unlink(struct isis *isis, struct vrf *vrf);
struct isis *isis_lookup_by_vrfid(vrf_id_t vrf_id);
#define ISIS_SR "/frr-isisd:isis/instance/segment-routing"
/* Master of threads. */
-extern struct thread_master *master;
+extern struct event_loop *master;
extern unsigned long debug_adj_pkt;
extern unsigned long debug_snp_pkt;
struct accept_ev {
LIST_ENTRY(accept_ev) entry;
- struct thread *ev;
- void (*accept_cb)(struct thread *);
+ struct event *ev;
+ void (*accept_cb)(struct event *);
void *arg;
int fd;
};
struct {
LIST_HEAD(, accept_ev) queue;
- struct thread *evt;
+ struct event *evt;
} accept_queue;
static void accept_arm(void);
static void accept_unarm(void);
-static void accept_cb(struct thread *);
-static void accept_timeout(struct thread *);
+static void accept_cb(struct event *);
+static void accept_timeout(struct event *);
void
accept_init(void)
LIST_INIT(&accept_queue.queue);
}
-int accept_add(int fd, void (*cb)(struct thread *), void *arg)
+int accept_add(int fd, void (*cb)(struct event *), void *arg)
{
struct accept_ev *av;
av->arg = arg;
LIST_INSERT_HEAD(&accept_queue.queue, av, entry);
- thread_add_read(master, accept_cb, av, av->fd, &av->ev);
+ event_add_read(master, accept_cb, av, av->fd, &av->ev);
log_debug("%s: accepting on fd %d", __func__, fd);
LIST_FOREACH(av, &accept_queue.queue, entry)
if (av->fd == fd) {
log_debug("%s: %d removed from queue", __func__, fd);
- THREAD_OFF(av->ev);
+ EVENT_OFF(av->ev);
LIST_REMOVE(av, entry);
free(av);
return;
{
log_debug(__func__);
accept_unarm();
- thread_add_timer(master, accept_timeout, NULL, 1, &accept_queue.evt);
+ event_add_timer(master, accept_timeout, NULL, 1, &accept_queue.evt);
}
void
{
if (accept_queue.evt != NULL) {
log_debug(__func__);
- THREAD_OFF(accept_queue.evt);
+ EVENT_OFF(accept_queue.evt);
accept_arm();
}
}
{
struct accept_ev *av;
LIST_FOREACH(av, &accept_queue.queue, entry) {
- thread_add_read(master, accept_cb, av, av->fd, &av->ev);
+ event_add_read(master, accept_cb, av, av->fd, &av->ev);
}
}
{
struct accept_ev *av;
LIST_FOREACH(av, &accept_queue.queue, entry)
- THREAD_OFF(av->ev);
+ EVENT_OFF(av->ev);
}
-static void accept_cb(struct thread *thread)
+static void accept_cb(struct event *thread)
{
- struct accept_ev *av = THREAD_ARG(thread);
- thread_add_read(master, accept_cb, av, av->fd, &av->ev);
+ struct accept_ev *av = EVENT_ARG(thread);
+ event_add_read(master, accept_cb, av, av->fd, &av->ev);
av->accept_cb(thread);
}
-static void accept_timeout(struct thread *thread)
+static void accept_timeout(struct event *thread)
{
accept_queue.evt = NULL;
#include "log.h"
static __inline int adj_compare(const struct adj *, const struct adj *);
-static void adj_itimer(struct thread *);
+static void adj_itimer(struct event *);
static __inline int tnbr_compare(const struct tnbr *, const struct tnbr *);
static void tnbr_del(struct ldpd_conf *, struct tnbr *);
static void tnbr_start(struct tnbr *);
static void tnbr_stop(struct tnbr *);
-static void tnbr_hello_timer(struct thread *);
+static void tnbr_hello_timer(struct event *);
static void tnbr_start_hello_timer(struct tnbr *);
static void tnbr_stop_hello_timer(struct tnbr *);
/* adjacency timers */
/* ARGSUSED */
-static void adj_itimer(struct thread *thread)
+static void adj_itimer(struct event *thread)
{
- struct adj *adj = THREAD_ARG(thread);
+ struct adj *adj = EVENT_ARG(thread);
adj->inactivity_timer = NULL;
void
adj_start_itimer(struct adj *adj)
{
- THREAD_OFF(adj->inactivity_timer);
+ EVENT_OFF(adj->inactivity_timer);
adj->inactivity_timer = NULL;
- thread_add_timer(master, adj_itimer, adj, adj->holdtime,
- &adj->inactivity_timer);
+ event_add_timer(master, adj_itimer, adj, adj->holdtime,
+ &adj->inactivity_timer);
}
void
adj_stop_itimer(struct adj *adj)
{
- THREAD_OFF(adj->inactivity_timer);
+ EVENT_OFF(adj->inactivity_timer);
}
/* targeted neighbors */
/* target neighbors timers */
/* ARGSUSED */
-static void tnbr_hello_timer(struct thread *thread)
+static void tnbr_hello_timer(struct event *thread)
{
- struct tnbr *tnbr = THREAD_ARG(thread);
+ struct tnbr *tnbr = EVENT_ARG(thread);
tnbr->hello_timer = NULL;
send_hello(HELLO_TARGETED, NULL, tnbr);
static void
tnbr_start_hello_timer(struct tnbr *tnbr)
{
- THREAD_OFF(tnbr->hello_timer);
+ EVENT_OFF(tnbr->hello_timer);
tnbr->hello_timer = NULL;
- thread_add_timer(master, tnbr_hello_timer, tnbr, tnbr_get_hello_interval(tnbr),
- &tnbr->hello_timer);
+ event_add_timer(master, tnbr_hello_timer, tnbr,
+ tnbr_get_hello_interval(tnbr), &tnbr->hello_timer);
}
static void
tnbr_stop_hello_timer(struct tnbr *tnbr)
{
- THREAD_OFF(tnbr->hello_timer);
+ EVENT_OFF(tnbr->hello_timer);
}
struct ctl_adj *
}
actl.holdtime = adj->holdtime;
actl.holdtime_remaining =
- thread_timer_remain_second(adj->inactivity_timer);
+ event_timer_remain_second(adj->inactivity_timer);
actl.trans_addr = adj->trans_addr;
actl.ds_tlv = adj->ds_tlv;
#define CONTROL_BACKLOG 5
-static void control_accept(struct thread *);
+static void control_accept(struct event *);
static struct ctl_conn *control_connbyfd(int);
static struct ctl_conn *control_connbypid(pid_t);
static void control_close(int);
-static void control_dispatch_imsg(struct thread *);
+static void control_dispatch_imsg(struct event *);
struct ctl_conns ctl_conns;
}
/* ARGSUSED */
-static void control_accept(struct thread *thread)
+static void control_accept(struct event *thread)
{
int connfd;
socklen_t len;
struct ctl_conn *c;
len = sizeof(s_un);
- if ((connfd = accept(THREAD_FD(thread), (struct sockaddr *)&s_un,
- &len)) == -1) {
+ if ((connfd = accept(EVENT_FD(thread), (struct sockaddr *)&s_un,
+ &len)) == -1) {
/*
* Pause accept if we are out of file descriptors, or
* libevent will haunt us here too.
imsg_init(&c->iev.ibuf, connfd);
c->iev.handler_read = control_dispatch_imsg;
c->iev.ev_read = NULL;
- thread_add_read(master, c->iev.handler_read, &c->iev, c->iev.ibuf.fd,
- &c->iev.ev_read);
+ event_add_read(master, c->iev.handler_read, &c->iev, c->iev.ibuf.fd,
+ &c->iev.ev_read);
c->iev.handler_write = ldp_write_handler;
c->iev.ev_write = NULL;
msgbuf_clear(&c->iev.ibuf.w);
TAILQ_REMOVE(&ctl_conns, c, entry);
- THREAD_OFF(c->iev.ev_read);
- THREAD_OFF(c->iev.ev_write);
+ EVENT_OFF(c->iev.ev_read);
+ EVENT_OFF(c->iev.ev_write);
close(c->iev.ibuf.fd);
accept_unpause();
free(c);
}
/* ARGSUSED */
-static void control_dispatch_imsg(struct thread *thread)
+static void control_dispatch_imsg(struct event *thread)
{
- int fd = THREAD_FD(thread);
+ int fd = EVENT_FD(thread);
struct ctl_conn *c;
struct imsg imsg;
ssize_t n;
static int if_start(struct iface *, int);
static int if_reset(struct iface *, int);
static void if_update_af(struct iface_af *);
-static void if_hello_timer(struct thread *thread);
+static void if_hello_timer(struct event *thread);
static void if_start_hello_timer(struct iface_af *);
static void if_stop_hello_timer(struct iface_af *);
static int if_join_ipv4_group(struct iface *, struct in_addr *);
static int ldp_sync_fsm_init(struct iface *iface, int state);
static int ldp_sync_act_iface_start_sync(struct iface *iface);
-static void iface_wait_for_ldp_sync_timer(struct thread *thread);
+static void iface_wait_for_ldp_sync_timer(struct event *thread);
static void start_wait_for_ldp_sync_timer(struct iface *iface);
static void stop_wait_for_ldp_sync_timer(struct iface *iface);
static int ldp_sync_act_ldp_start_sync(struct iface *iface);
/* timers */
/* ARGSUSED */
-static void if_hello_timer(struct thread *thread)
+static void if_hello_timer(struct event *thread)
{
- struct iface_af *ia = THREAD_ARG(thread);
+ struct iface_af *ia = EVENT_ARG(thread);
ia->hello_timer = NULL;
send_hello(HELLO_LINK, ia, NULL);
static void
if_start_hello_timer(struct iface_af *ia)
{
- THREAD_OFF(ia->hello_timer);
- thread_add_timer(master, if_hello_timer, ia, if_get_hello_interval(ia),
- &ia->hello_timer);
+ EVENT_OFF(ia->hello_timer);
+ event_add_timer(master, if_hello_timer, ia, if_get_hello_interval(ia),
+ &ia->hello_timer);
}
static void
if_stop_hello_timer(struct iface_af *ia)
{
- THREAD_OFF(ia->hello_timer);
+ EVENT_OFF(ia->hello_timer);
}
struct ctl_iface *
ictl.timer_running = iface->ldp_sync.wait_for_sync_timer ? true : false;
ictl.wait_time_remaining =
- thread_timer_remain_second(iface->ldp_sync.wait_for_sync_timer);
+ event_timer_remain_second(iface->ldp_sync.wait_for_sync_timer);
memset(&ictl.peer_ldp_id, 0, sizeof(ictl.peer_ldp_id));
return (0);
}
-static void iface_wait_for_ldp_sync_timer(struct thread *thread)
+static void iface_wait_for_ldp_sync_timer(struct event *thread)
{
- struct iface *iface = THREAD_ARG(thread);
+ struct iface *iface = EVENT_ARG(thread);
ldp_sync_fsm(iface, LDP_SYNC_EVT_LDP_SYNC_COMPLETE);
}
if (iface->ldp_sync.wait_for_sync_timer)
return;
- THREAD_OFF(iface->ldp_sync.wait_for_sync_timer);
- thread_add_timer(master, iface_wait_for_ldp_sync_timer, iface,
+ EVENT_OFF(iface->ldp_sync.wait_for_sync_timer);
+ event_add_timer(master, iface_wait_for_ldp_sync_timer, iface,
if_get_wait_for_sync_interval(),
&iface->ldp_sync.wait_for_sync_timer);
}
static void stop_wait_for_ldp_sync_timer(struct iface *iface)
{
- THREAD_OFF(iface->ldp_sync.wait_for_sync_timer);
+ EVENT_OFF(iface->ldp_sync.wait_for_sync_timer);
}
static int
#include "libfrr.h"
static void lde_shutdown(void);
-static void lde_dispatch_imsg(struct thread *thread);
-static void lde_dispatch_parent(struct thread *thread);
+static void lde_dispatch_imsg(struct event *thread);
+static void lde_dispatch_parent(struct event *thread);
static __inline int lde_nbr_compare(const struct lde_nbr *,
const struct lde_nbr *);
static struct lde_nbr *lde_nbr_new(uint32_t, struct lde_nbr *);
fatal(NULL);
imsg_init(&iev_main->ibuf, LDPD_FD_ASYNC);
iev_main->handler_read = lde_dispatch_parent;
- thread_add_read(master, iev_main->handler_read, iev_main, iev_main->ibuf.fd,
- &iev_main->ev_read);
+ event_add_read(master, iev_main->handler_read, iev_main,
+ iev_main->ibuf.fd, &iev_main->ev_read);
iev_main->handler_write = ldp_write_handler;
memset(&iev_main_sync_data, 0, sizeof(iev_main_sync_data));
/* create base configuration */
ldeconf = config_new_empty();
- struct thread thread;
- while (thread_fetch(master, &thread))
- thread_call(&thread);
+ struct event thread;
+ while (event_fetch(master, &thread))
+ event_call(&thread);
/* NOTREACHED */
return;
}
/* ARGSUSED */
-static void lde_dispatch_imsg(struct thread *thread)
+static void lde_dispatch_imsg(struct event *thread)
{
- struct imsgev *iev = THREAD_ARG(thread);
+ struct imsgev *iev = EVENT_ARG(thread);
struct imsgbuf *ibuf = &iev->ibuf;
struct imsg imsg;
struct lde_nbr *ln;
imsg_event_add(iev);
else {
/* this pipe is dead, so remove the event handlers and exit */
- THREAD_OFF(iev->ev_read);
- THREAD_OFF(iev->ev_write);
+ EVENT_OFF(iev->ev_read);
+ EVENT_OFF(iev->ev_write);
lde_shutdown();
}
}
/* ARGSUSED */
-static void lde_dispatch_parent(struct thread *thread)
+static void lde_dispatch_parent(struct event *thread)
{
static struct ldpd_conf *nconf;
struct iface *iface, *niface;
struct kif *kif;
struct kroute *kr;
int fd;
- struct imsgev *iev = THREAD_ARG(thread);
+ struct imsgev *iev = EVENT_ARG(thread);
struct imsgbuf *ibuf = &iev->ibuf;
ssize_t n;
int shut = 0;
fatal(NULL);
imsg_init(&iev_ldpe->ibuf, fd);
iev_ldpe->handler_read = lde_dispatch_imsg;
- thread_add_read(master, iev_ldpe->handler_read, iev_ldpe, iev_ldpe->ibuf.fd,
- &iev_ldpe->ev_read);
+ event_add_read(master, iev_ldpe->handler_read, iev_ldpe,
+ iev_ldpe->ibuf.fd, &iev_ldpe->ev_read);
iev_ldpe->handler_write = ldp_write_handler;
iev_ldpe->ev_write = NULL;
break;
imsg_event_add(iev);
else {
/* this pipe is dead, so remove the event handlers and exit */
- THREAD_OFF(iev->ev_read);
- THREAD_OFF(iev->ev_write);
+ EVENT_OFF(iev->ev_read);
+ EVENT_OFF(iev->ev_write);
lde_shutdown();
}
}
/*
* Event callback used to retry the label-manager sync zapi session.
*/
-static void zclient_sync_retry(struct thread *thread)
+static void zclient_sync_retry(struct event *thread)
{
zclient_sync_init();
}
zclient_sync = NULL;
/* Retry using a timer */
- thread_add_timer(master, zclient_sync_retry, NULL, 1, NULL);
+ event_add_timer(master, zclient_sync_retry, NULL, 1, NULL);
}
static void
extern struct ldpd_conf *ldeconf;
extern struct fec_tree ft;
extern struct nbr_tree lde_nbrs;
-extern struct thread *gc_timer;
+extern struct event *gc_timer;
/* lde.c */
void lde(void);
void lde_check_withdraw_wcard(struct map *, struct lde_nbr *);
int lde_wildcard_apply(struct map *, struct fec *,
struct lde_map *);
-void lde_gc_timer(struct thread *thread);
+void lde_gc_timer(struct event *thread);
void lde_gc_start_timer(void);
void lde_gc_stop_timer(void);
RB_GENERATE(fec_tree, fec, entry, fec_compare)
struct fec_tree ft = RB_INITIALIZER(&ft);
-struct thread *gc_timer;
+struct event *gc_timer;
/* FEC tree functions */
void
/* gabage collector timer: timer to remove dead entries from the LIB */
/* ARGSUSED */
-void lde_gc_timer(struct thread *thread)
+void lde_gc_timer(struct event *thread)
{
struct fec *fec, *safe;
struct fec_node *fn;
void
lde_gc_start_timer(void)
{
- THREAD_OFF(gc_timer);
- thread_add_timer(master, lde_gc_timer, NULL, LDE_GC_INTERVAL,
- &gc_timer);
+ EVENT_OFF(gc_timer);
+ event_add_timer(master, lde_gc_timer, NULL, LDE_GC_INTERVAL, &gc_timer);
}
void
lde_gc_stop_timer(void)
{
- THREAD_OFF(gc_timer);
+ EVENT_OFF(gc_timer);
}
return 0;
}
-static int ldp_snmp_init(struct thread_master *tm)
+static int ldp_snmp_init(struct event_loop *tm)
{
hook_register(agentx_enabled, ldp_snmp_agentx_enabled);
return 0;
}
-static int ldp_snmp_register_mib(struct thread_master *tm)
+static int ldp_snmp_register_mib(struct event_loop *tm)
{
static int registered = 0;
zpw->nexthop.ipv6 = pw->addr.v6;
zpw->local_label = NO_LABEL;
zpw->remote_label = NO_LABEL;
- if (pw->flags & F_PW_CWORD)
+ if (CHECK_FLAG(pw->flags, F_PW_CWORD))
zpw->flags = F_PSEUDOWIRE_CWORD;
zpw->data.ldp.lsr_id = pw->lsr_id;
zpw->data.ldp.pwid = pw->pwid;
struct zapi_rlfa_igp igp;
struct zapi_rlfa_request rlfa;
- s = zclient->ibuf;
+ s = zclient->ibuf;
- if (zclient_opaque_decode(s, &info) != 0)
- return -1;
+ if(zclient_opaque_decode(s, &info) != 0)
+ return -1;
switch (info.type) {
case LDP_IGP_SYNC_IF_STATE_REQUEST:
* dropping them).
*/
if (kr->remote_label == NO_LABEL
- && !(ldpd_conf->flags & F_LDPD_ALLOW_BROKEN_LSP)
+ && !CHECK_FLAG(ldpd_conf->flags, F_LDPD_ALLOW_BROKEN_LSP)
&& cmd == ZEBRA_MPLS_LABELS_ADD)
return 0;
for (ALL_LIST_ELEMENTS_RO(ifp->connected, cnode, ifc)) {
ifc2kaddr(ifp, ifc, &ka);
- main_imsg_compose_ldpe(IMSG_NEWADDR, 0, &ka,
- sizeof(ka));
+ main_imsg_compose_ldpe(IMSG_NEWADDR, 0, &ka, sizeof(ka));
}
}
}
if (if_is_operative(ifp)) {
for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, ifc)) {
ifc2kaddr(ifp, ifc, &ka);
- main_imsg_compose_ldpe(IMSG_NEWADDR, 0, &ka,
- sizeof(ka));
+ main_imsg_compose_ldpe(IMSG_NEWADDR, 0, &ka, sizeof(ka));
}
} else {
for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, ifc)) {
ifc2kaddr(ifp, ifc, &ka);
- main_imsg_compose_ldpe(IMSG_DELADDR, 0, &ka,
- sizeof(ka));
+ main_imsg_compose_ldpe(IMSG_DELADDR, 0, &ka, sizeof(ka));
}
}
switch (api.type) {
case ZEBRA_ROUTE_CONNECT:
- kr.flags |= F_CONNECTED;
+ SET_FLAG(kr.flags, F_CONNECTED);
break;
case ZEBRA_ROUTE_BGP:
/* LDP should follow the IGP and ignore BGP routes */
kr.ifindex = api_nh->ifindex;
break;
case NEXTHOP_TYPE_IFINDEX:
- if (!(kr.flags & F_CONNECTED))
+ if (!CHECK_FLAG(kr.flags, F_CONNECTED))
continue;
break;
case NEXTHOP_TYPE_BLACKHOLE:
zebra_route_string(api.type));
if (add)
- main_imsg_compose_lde(IMSG_NETWORK_ADD, 0, &kr,
- sizeof(kr));
+ main_imsg_compose_lde(IMSG_NETWORK_ADD, 0, &kr, sizeof(kr));
}
main_imsg_compose_lde(IMSG_NETWORK_UPDATE, 0, &kr, sizeof(kr));
/* if MPLS was already enabled and we are re-connecting, register again
*/
- if (vty_conf->flags & F_LDPD_ENABLED)
+ if (CHECK_FLAG(vty_conf->flags, F_LDPD_ENABLED))
ldp_zebra_regdereg_zebra_info(true);
ldp_zebra_opaque_register();
if (access && access->name[0] != '\0') {
strlcpy(laccess.name, access->name, sizeof(laccess.name));
- debug_evt("%s ACL update filter name %s", __func__,
- access->name);
+ debug_evt("%s ACL update filter name %s", __func__, access->name);
- main_imsg_compose_both(IMSG_FILTER_UPDATE, &laccess,
- sizeof(laccess));
+ main_imsg_compose_both(IMSG_FILTER_UPDATE, &laccess, sizeof(laccess));
}
}
[ZEBRA_OPAQUE_MESSAGE] = ldp_zebra_opaque_msg_handler,
};
-void
-ldp_zebra_init(struct thread_master *master)
+void ldp_zebra_init(struct event_loop *master)
{
- if_zapi_callbacks(ldp_ifp_create, ldp_ifp_up,
- ldp_ifp_down, ldp_ifp_destroy);
+ if_zapi_callbacks(ldp_ifp_create, ldp_ifp_up, ldp_ifp_down, ldp_ifp_destroy);
/* Set default values. */
zclient = zclient_new(master, &zclient_options_default, ldp_handlers,
static void ldpd_shutdown(void);
static pid_t start_child(enum ldpd_process, char *, int, int);
-static void main_dispatch_ldpe(struct thread *thread);
-static void main_dispatch_lde(struct thread *thread);
+static void main_dispatch_ldpe(struct event *thread);
+static void main_dispatch_lde(struct event *thread);
static int main_imsg_send_ipc_sockets(struct imsgbuf *,
struct imsgbuf *);
static void main_imsg_send_net_sockets(int);
static struct frr_daemon_info ldpd_di;
-DEFINE_HOOK(ldp_register_mib, (struct thread_master * tm), (tm));
+DEFINE_HOOK(ldp_register_mib, (struct event_loop * tm), (tm));
static void ldp_load_module(const char *name)
{
#define LDP_VTY_PORT 2612
/* Master of threads. */
-struct thread_master *master;
+struct event_loop *master;
/* ldpd privileges */
static zebra_capabilities_t _caps_p [] =
.n_yang_modules = array_size(ldpd_yang_modules),
);
-static void ldp_config_fork_apply(struct thread *t)
+static void ldp_config_fork_apply(struct event *t)
{
/*
* So the frr_config_fork() function schedules
frr_config_fork();
/* apply configuration */
- thread_add_event(master, ldp_config_fork_apply, NULL, 0, NULL);
+ event_add_event(master, ldp_config_fork_apply, NULL, 0, NULL);
/* setup pipes to children */
if ((iev_ldpe = calloc(1, sizeof(struct imsgev))) == NULL ||
imsg_init(&iev_ldpe->ibuf, pipe_parent2ldpe[0]);
iev_ldpe->handler_read = main_dispatch_ldpe;
- thread_add_read(master, iev_ldpe->handler_read, iev_ldpe, iev_ldpe->ibuf.fd,
- &iev_ldpe->ev_read);
+ event_add_read(master, iev_ldpe->handler_read, iev_ldpe,
+ iev_ldpe->ibuf.fd, &iev_ldpe->ev_read);
iev_ldpe->handler_write = ldp_write_handler;
imsg_init(&iev_ldpe_sync->ibuf, pipe_parent2ldpe_sync[0]);
iev_ldpe_sync->handler_read = main_dispatch_ldpe;
- thread_add_read(master, iev_ldpe_sync->handler_read, iev_ldpe_sync, iev_ldpe_sync->ibuf.fd,
- &iev_ldpe_sync->ev_read);
+ event_add_read(master, iev_ldpe_sync->handler_read, iev_ldpe_sync,
+ iev_ldpe_sync->ibuf.fd, &iev_ldpe_sync->ev_read);
iev_ldpe_sync->handler_write = ldp_write_handler;
imsg_init(&iev_lde->ibuf, pipe_parent2lde[0]);
iev_lde->handler_read = main_dispatch_lde;
- thread_add_read(master, iev_lde->handler_read, iev_lde, iev_lde->ibuf.fd,
- &iev_lde->ev_read);
+ event_add_read(master, iev_lde->handler_read, iev_lde, iev_lde->ibuf.fd,
+ &iev_lde->ev_read);
iev_lde->handler_write = ldp_write_handler;
imsg_init(&iev_lde_sync->ibuf, pipe_parent2lde_sync[0]);
iev_lde_sync->handler_read = main_dispatch_lde;
- thread_add_read(master, iev_lde_sync->handler_read, iev_lde_sync, iev_lde_sync->ibuf.fd,
- &iev_lde_sync->ev_read);
+ event_add_read(master, iev_lde_sync->handler_read, iev_lde_sync,
+ iev_lde_sync->ibuf.fd, &iev_lde_sync->ev_read);
iev_lde_sync->handler_write = ldp_write_handler;
if (main_imsg_send_ipc_sockets(&iev_ldpe->ibuf, &iev_lde->ibuf))
/* imsg handling */
/* ARGSUSED */
-static void main_dispatch_ldpe(struct thread *thread)
+static void main_dispatch_ldpe(struct event *thread)
{
- struct imsgev *iev = THREAD_ARG(thread);
+ struct imsgev *iev = EVENT_ARG(thread);
struct imsgbuf *ibuf = &iev->ibuf;
struct imsg imsg;
int af;
imsg_event_add(iev);
else {
/* this pipe is dead, so remove the event handlers and exit */
- THREAD_OFF(iev->ev_read);
- THREAD_OFF(iev->ev_write);
+ EVENT_OFF(iev->ev_read);
+ EVENT_OFF(iev->ev_write);
ldpe_pid = 0;
if (lde_pid == 0)
}
/* ARGSUSED */
-static void main_dispatch_lde(struct thread *thread)
+static void main_dispatch_lde(struct event *thread)
{
- struct imsgev *iev = THREAD_ARG(thread);
+ struct imsgev *iev = EVENT_ARG(thread);
struct imsgbuf *ibuf = &iev->ibuf;
struct imsg imsg;
ssize_t n;
imsg_event_add(iev);
else {
/* this pipe is dead, so remove the event handlers and exit */
- THREAD_OFF(iev->ev_read);
- THREAD_OFF(iev->ev_write);
+ EVENT_OFF(iev->ev_read);
+ EVENT_OFF(iev->ev_write);
lde_pid = 0;
if (ldpe_pid == 0)
ldpd_shutdown();
}
/* ARGSUSED */
-void ldp_write_handler(struct thread *thread)
+void ldp_write_handler(struct event *thread)
{
- struct imsgev *iev = THREAD_ARG(thread);
+ struct imsgev *iev = EVENT_ARG(thread);
struct imsgbuf *ibuf = &iev->ibuf;
ssize_t n;
fatal("msgbuf_write");
if (n == 0) {
/* this pipe is dead, so remove the event handlers */
- THREAD_OFF(iev->ev_read);
- THREAD_OFF(iev->ev_write);
+ EVENT_OFF(iev->ev_read);
+ EVENT_OFF(iev->ev_write);
return;
}
imsg_event_add(struct imsgev *iev)
{
if (iev->handler_read)
- thread_add_read(master, iev->handler_read, iev, iev->ibuf.fd,
- &iev->ev_read);
+ event_add_read(master, iev->handler_read, iev, iev->ibuf.fd,
+ &iev->ev_read);
if (iev->handler_write && iev->ibuf.w.queued)
- thread_add_write(master, iev->handler_write, iev,
- iev->ibuf.fd, &iev->ev_write);
+ event_add_write(master, iev->handler_write, iev, iev->ibuf.fd,
+ &iev->ev_write);
}
int
evbuf_event_add(struct evbuf *eb)
{
if (eb->wbuf.queued)
- thread_add_write(master, eb->handler, eb->arg, eb->wbuf.fd,
- &eb->ev);
+ event_add_write(master, eb->handler, eb->arg, eb->wbuf.fd,
+ &eb->ev);
}
-void evbuf_init(struct evbuf *eb, int fd, void (*handler)(struct thread *),
+void evbuf_init(struct evbuf *eb, int fd, void (*handler)(struct event *),
void *arg)
{
msgbuf_init(&eb->wbuf);
void
evbuf_clear(struct evbuf *eb)
{
- THREAD_OFF(eb->ev);
+ EVENT_OFF(eb->ev);
msgbuf_clear(&eb->wbuf);
eb->wbuf.fd = -1;
}
#include "queue.h"
#include "openbsd-tree.h"
#include "imsg.h"
-#include "thread.h"
+#include "frrevent.h"
#include "qobj.h"
#include "prefix.h"
#include "filter.h"
struct evbuf {
struct msgbuf wbuf;
- struct thread *ev;
- void (*handler)(struct thread *);
+ struct event *ev;
+ void (*handler)(struct event *);
void *arg;
};
struct imsgev {
struct imsgbuf ibuf;
- void (*handler_write)(struct thread *);
- struct thread *ev_write;
- void (*handler_read)(struct thread *);
- struct thread *ev_read;
+ void (*handler_write)(struct event *);
+ struct event *ev_write;
+ void (*handler_read)(struct event *);
+ struct event *ev_read;
};
enum imsg_type {
int state;
struct ia_adj_head adj_tree;
time_t uptime;
- struct thread *hello_timer;
+ struct event *hello_timer;
uint16_t hello_holdtime;
uint16_t hello_interval;
};
struct iface_ldp_sync {
int state;
- struct thread *wait_for_sync_timer;
+ struct event *wait_for_sync_timer;
};
struct iface {
/* source of targeted hellos */
struct tnbr {
RB_ENTRY(tnbr) entry;
- struct thread *hello_timer;
+ struct event *hello_timer;
struct adj *adj;
int af;
union ldpd_addr addr;
#define F_LDPD_ALLOW_BROKEN_LSP 0x0010
struct ldpd_af_global {
- struct thread *disc_ev;
- struct thread *edisc_ev;
+ struct event *disc_ev;
+ struct event *edisc_ev;
int ldp_disc_socket;
int ldp_edisc_socket;
int ldp_session_socket;
socklen_t sockaddr_len(struct sockaddr *);
/* ldpd.c */
-void ldp_write_handler(struct thread *thread);
+void ldp_write_handler(struct event *thread);
void main_imsg_compose_ldpe(int, pid_t, void *, uint16_t);
void main_imsg_compose_lde(int, pid_t, void *, uint16_t);
int main_imsg_compose_both(enum imsg_type, void *,
pid_t, int, void *, uint16_t);
void evbuf_enqueue(struct evbuf *, struct ibuf *);
void evbuf_event_add(struct evbuf *);
-void evbuf_init(struct evbuf *, int, void (*)(struct thread *), void *);
+void evbuf_init(struct evbuf *, int, void (*)(struct event *), void *);
void evbuf_clear(struct evbuf *);
int ldp_acl_request(struct imsgev *, char *, int,
union ldpd_addr *, uint8_t);
const char *pw_error_code(uint8_t);
/* quagga */
-extern struct thread_master *master;
+extern struct event_loop *master;
extern char ctl_sock_path[MAXPATHLEN];
/* ldp_zebra.c */
-void ldp_zebra_init(struct thread_master *);
+void ldp_zebra_init(struct event_loop *m);
void ldp_zebra_destroy(void);
int ldp_sync_zebra_send_state_update(struct ldp_igp_sync_if_state *);
int ldp_zebra_send_rlfa_labels(struct zapi_rlfa_response *
(__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_INTFACELOCAL))
#endif
-DECLARE_HOOK(ldp_register_mib, (struct thread_master * tm), (tm));
+DECLARE_HOOK(ldp_register_mib, (struct event_loop * tm), (tm));
extern void ldp_agentx_enabled(void);
#include "libfrr.h"
static void ldpe_shutdown(void);
-static void ldpe_dispatch_main(struct thread *thread);
-static void ldpe_dispatch_lde(struct thread *thread);
+static void ldpe_dispatch_main(struct event *thread);
+static void ldpe_dispatch_lde(struct event *thread);
#ifdef __OpenBSD__
-static void ldpe_dispatch_pfkey(struct thread *thread);
+static void ldpe_dispatch_pfkey(struct event *thread);
#endif
static void ldpe_setup_sockets(int, int, int, int);
static void ldpe_close_sockets(int);
static struct imsgev *iev_main, *iev_main_sync;
static struct imsgev *iev_lde;
#ifdef __OpenBSD__
-static struct thread *pfkey_ev;
+static struct event *pfkey_ev;
#endif
/* ldpe privileges */
fatal(NULL);
imsg_init(&iev_main->ibuf, LDPD_FD_ASYNC);
iev_main->handler_read = ldpe_dispatch_main;
- thread_add_read(master, iev_main->handler_read, iev_main, iev_main->ibuf.fd,
- &iev_main->ev_read);
+ event_add_read(master, iev_main->handler_read, iev_main,
+ iev_main->ibuf.fd, &iev_main->ev_read);
iev_main->handler_write = ldp_write_handler;
memset(&iev_main_data, 0, sizeof(iev_main_data));
/* create base configuration */
leconf = config_new_empty();
- struct thread thread;
- while (thread_fetch(master, &thread))
- thread_call(&thread);
+ struct event thread;
+ while (event_fetch(master, &thread))
+ event_call(&thread);
/* NOTREACHED */
return;
/* This socket must be open before dropping privileges. */
global.pfkeysock = pfkey_init();
if (sysdep.no_pfkey == 0) {
- thread_add_read(master, ldpe_dispatch_pfkey, NULL, global.pfkeysock,
- &pfkey_ev);
+ event_add_read(master, ldpe_dispatch_pfkey, NULL,
+ global.pfkeysock, &pfkey_ev);
}
#endif
#ifdef __OpenBSD__
if (sysdep.no_pfkey == 0) {
- THREAD_OFF(pfkey_ev);
+ EVENT_OFF(pfkey_ev);
close(global.pfkeysock);
}
#endif
}
/* ARGSUSED */
-static void ldpe_dispatch_main(struct thread *thread)
+static void ldpe_dispatch_main(struct event *thread)
{
static struct ldpd_conf *nconf;
struct iface *niface;
struct l2vpn_pw *pw, *npw;
struct imsg imsg;
int fd;
- struct imsgev *iev = THREAD_ARG(thread);
+ struct imsgev *iev = EVENT_ARG(thread);
struct imsgbuf *ibuf = &iev->ibuf;
struct iface *iface = NULL;
struct kif *kif;
fatal(NULL);
imsg_init(&iev_lde->ibuf, fd);
iev_lde->handler_read = ldpe_dispatch_lde;
- thread_add_read(master, iev_lde->handler_read, iev_lde, iev_lde->ibuf.fd,
- &iev_lde->ev_read);
+ event_add_read(master, iev_lde->handler_read, iev_lde,
+ iev_lde->ibuf.fd, &iev_lde->ev_read);
iev_lde->handler_write = ldp_write_handler;
iev_lde->ev_write = NULL;
break;
imsg_event_add(iev);
else {
/* this pipe is dead, so remove the event handlers and exit */
- THREAD_OFF(iev->ev_read);
- THREAD_OFF(iev->ev_write);
+ EVENT_OFF(iev->ev_read);
+ EVENT_OFF(iev->ev_write);
ldpe_shutdown();
}
}
/* ARGSUSED */
-static void ldpe_dispatch_lde(struct thread *thread)
+static void ldpe_dispatch_lde(struct event *thread)
{
- struct imsgev *iev = THREAD_ARG(thread);
+ struct imsgev *iev = EVENT_ARG(thread);
struct imsgbuf *ibuf = &iev->ibuf;
struct imsg imsg;
struct map *map;
imsg_event_add(iev);
else {
/* this pipe is dead, so remove the event handlers and exit */
- THREAD_OFF(iev->ev_read);
- THREAD_OFF(iev->ev_write);
+ EVENT_OFF(iev->ev_read);
+ EVENT_OFF(iev->ev_write);
ldpe_shutdown();
}
}
#ifdef __OpenBSD__
/* ARGSUSED */
-static void ldpe_dispatch_pfkey(struct thread *thread)
+static void ldpe_dispatch_pfkey(struct event *thread)
{
- int fd = THREAD_FD(thread);
+ int fd = EVENT_FD(thread);
- thread_add_read(master, ldpe_dispatch_pfkey, NULL, global.pfkeysock,
- &pfkey_ev);
+ event_add_read(master, ldpe_dispatch_pfkey, NULL, global.pfkeysock,
+ &pfkey_ev);
if (pfkey_read(fd, NULL) == -1)
fatal("pfkey_read failed, exiting...");
/* discovery socket */
af_global->ldp_disc_socket = disc_socket;
- thread_add_read(master, disc_recv_packet, &af_global->disc_ev, af_global->ldp_disc_socket,
- &af_global->disc_ev);
+ event_add_read(master, disc_recv_packet, &af_global->disc_ev,
+ af_global->ldp_disc_socket, &af_global->disc_ev);
/* extended discovery socket */
af_global->ldp_edisc_socket = edisc_socket;
- thread_add_read(master, disc_recv_packet, &af_global->edisc_ev, af_global->ldp_edisc_socket,
- &af_global->edisc_ev);
+ event_add_read(master, disc_recv_packet, &af_global->edisc_ev,
+ af_global->ldp_edisc_socket, &af_global->edisc_ev);
/* session socket */
af_global->ldp_session_socket = session_socket;
af_global = ldp_af_global_get(&global, af);
/* discovery socket */
- THREAD_OFF(af_global->disc_ev);
+ EVENT_OFF(af_global->disc_ev);
if (af_global->ldp_disc_socket != -1) {
close(af_global->ldp_disc_socket);
af_global->ldp_disc_socket = -1;
}
/* extended discovery socket */
- THREAD_OFF(af_global->edisc_ev);
+ EVENT_OFF(af_global->edisc_ev);
if (af_global->ldp_edisc_socket != -1) {
close(af_global->ldp_edisc_socket);
af_global->ldp_edisc_socket = -1;
struct nbr *nbr;
int ds_tlv;
struct hello_source source;
- struct thread *inactivity_timer;
+ struct event *inactivity_timer;
uint16_t holdtime;
union ldpd_addr trans_addr;
};
int fd;
struct ibuf_read *rbuf;
struct evbuf wbuf;
- struct thread *rev;
+ struct event *rev;
in_port_t lport;
in_port_t rport;
};
RB_ENTRY(nbr) id_tree, addr_tree, pid_tree;
struct tcp_conn *tcp;
struct nbr_adj_head adj_tree; /* adjacencies */
- struct thread *ev_connect;
- struct thread *keepalive_timer;
- struct thread *keepalive_timeout;
- struct thread *init_timeout;
- struct thread *initdelay_timer;
+ struct event *ev_connect;
+ struct event *keepalive_timer;
+ struct event *keepalive_timeout;
+ struct event *init_timeout;
+ struct event *initdelay_timer;
struct mapping_head mapping_list;
struct mapping_head withdraw_list;
int fd;
int af;
union ldpd_addr addr;
- struct thread *ev_timeout;
+ struct event *ev_timeout;
};
#define PENDING_CONN_TIMEOUT 5
/* accept.c */
void accept_init(void);
-int accept_add(int, void (*)(struct thread *), void *);
+int accept_add(int, void (*)(struct event *), void *);
void accept_del(int);
void accept_pause(void);
void accept_unpause(void);
int gen_msg_hdr(struct ibuf *, uint16_t, uint16_t);
int send_packet(int, int, union ldpd_addr *,
struct iface_af *, void *, size_t);
-void disc_recv_packet(struct thread *thread);
-void session_accept(struct thread *thread);
+void disc_recv_packet(struct event *thread);
+void session_accept(struct event *thread);
void session_accept_nbr(struct nbr *, int);
void session_shutdown(struct nbr *, uint32_t, uint32_t,
uint32_t);
static __inline int nbr_pid_compare(const struct nbr *,
const struct nbr *);
static void nbr_update_peerid(struct nbr *);
-static void nbr_ktimer(struct thread *thread);
+static void nbr_ktimer(struct event *thread);
static void nbr_start_ktimer(struct nbr *);
-static void nbr_ktimeout(struct thread *thread);
+static void nbr_ktimeout(struct event *thread);
static void nbr_start_ktimeout(struct nbr *);
-static void nbr_itimeout(struct thread *thread);
+static void nbr_itimeout(struct event *thread);
static void nbr_start_itimeout(struct nbr *);
-static void nbr_idtimer(struct thread *thread);
+static void nbr_idtimer(struct event *thread);
static int nbr_act_session_operational(struct nbr *);
static void nbr_send_labelmappings(struct nbr *);
static __inline int nbr_params_compare(const struct nbr_params *,
nbr->auth.method = AUTH_NONE;
if (nbr_pending_connect(nbr))
- THREAD_OFF(nbr->ev_connect);
+ EVENT_OFF(nbr->ev_connect);
nbr_stop_ktimer(nbr);
nbr_stop_ktimeout(nbr);
nbr_stop_itimeout(nbr);
/* Keepalive timer: timer to send keepalive message to neighbors */
-static void nbr_ktimer(struct thread *thread)
+static void nbr_ktimer(struct event *thread)
{
- struct nbr *nbr = THREAD_ARG(thread);
+ struct nbr *nbr = EVENT_ARG(thread);
nbr->keepalive_timer = NULL;
send_keepalive(nbr);
/* send three keepalives per period */
secs = nbr->keepalive / KEEPALIVE_PER_PERIOD;
- THREAD_OFF(nbr->keepalive_timer);
+ EVENT_OFF(nbr->keepalive_timer);
nbr->keepalive_timer = NULL;
- thread_add_timer(master, nbr_ktimer, nbr, secs, &nbr->keepalive_timer);
+ event_add_timer(master, nbr_ktimer, nbr, secs, &nbr->keepalive_timer);
}
void
nbr_stop_ktimer(struct nbr *nbr)
{
- THREAD_OFF(nbr->keepalive_timer);
+ EVENT_OFF(nbr->keepalive_timer);
}
/* Keepalive timeout: if the nbr hasn't sent keepalive */
-static void nbr_ktimeout(struct thread *thread)
+static void nbr_ktimeout(struct event *thread)
{
- struct nbr *nbr = THREAD_ARG(thread);
+ struct nbr *nbr = EVENT_ARG(thread);
nbr->keepalive_timeout = NULL;
static void
nbr_start_ktimeout(struct nbr *nbr)
{
- THREAD_OFF(nbr->keepalive_timeout);
+ EVENT_OFF(nbr->keepalive_timeout);
nbr->keepalive_timeout = NULL;
- thread_add_timer(master, nbr_ktimeout, nbr, nbr->keepalive,
- &nbr->keepalive_timeout);
+ event_add_timer(master, nbr_ktimeout, nbr, nbr->keepalive,
+ &nbr->keepalive_timeout);
}
void
nbr_stop_ktimeout(struct nbr *nbr)
{
- THREAD_OFF(nbr->keepalive_timeout);
+ EVENT_OFF(nbr->keepalive_timeout);
}
/* Session initialization timeout: if nbr got stuck in the initialization FSM */
-static void nbr_itimeout(struct thread *thread)
+static void nbr_itimeout(struct event *thread)
{
- struct nbr *nbr = THREAD_ARG(thread);
+ struct nbr *nbr = EVENT_ARG(thread);
log_debug("%s: lsr-id %pI4", __func__, &nbr->id);
int secs;
secs = INIT_FSM_TIMEOUT;
- THREAD_OFF(nbr->init_timeout);
+ EVENT_OFF(nbr->init_timeout);
nbr->init_timeout = NULL;
- thread_add_timer(master, nbr_itimeout, nbr, secs, &nbr->init_timeout);
+ event_add_timer(master, nbr_itimeout, nbr, secs, &nbr->init_timeout);
}
void
nbr_stop_itimeout(struct nbr *nbr)
{
- THREAD_OFF(nbr->init_timeout);
+ EVENT_OFF(nbr->init_timeout);
}
/* Init delay timer: timer to retry to iniziatize session */
-static void nbr_idtimer(struct thread *thread)
+static void nbr_idtimer(struct event *thread)
{
- struct nbr *nbr = THREAD_ARG(thread);
+ struct nbr *nbr = EVENT_ARG(thread);
nbr->initdelay_timer = NULL;
break;
}
- THREAD_OFF(nbr->initdelay_timer);
+ EVENT_OFF(nbr->initdelay_timer);
nbr->initdelay_timer = NULL;
- thread_add_timer(master, nbr_idtimer, nbr, secs,
- &nbr->initdelay_timer);
+ event_add_timer(master, nbr_idtimer, nbr, secs, &nbr->initdelay_timer);
}
void
nbr_stop_idtimer(struct nbr *nbr)
{
- THREAD_OFF(nbr->initdelay_timer);
+ EVENT_OFF(nbr->initdelay_timer);
}
int
return (nbr->ev_connect != NULL);
}
-static void nbr_connect_cb(struct thread *thread)
+static void nbr_connect_cb(struct event *thread)
{
- struct nbr *nbr = THREAD_ARG(thread);
+ struct nbr *nbr = EVENT_ARG(thread);
int error;
socklen_t len;
if (connect(nbr->fd, &remote_su.sa, sockaddr_len(&remote_su.sa))
== -1) {
if (errno == EINPROGRESS) {
- thread_add_write(master, nbr_connect_cb, nbr, nbr->fd,
- &nbr->ev_connect);
+ event_add_write(master, nbr_connect_cb, nbr, nbr->fd,
+ &nbr->ev_connect);
return (0);
}
log_warn("%s: error while connecting to %s", __func__,
nctl.flags = nbr->flags;
nctl.max_pdu_len = nbr->max_pdu_len;
nctl.hold_time_remaining =
- thread_timer_remain_second(nbr->keepalive_timer);
+ event_timer_remain_second(nbr->keepalive_timer);
gettimeofday(&now, NULL);
if (nbr->state == NBR_STA_OPER) {
static struct iface *disc_find_iface(unsigned int, int,
union ldpd_addr *);
-static void session_read(struct thread *thread);
-static void session_write(struct thread *thread);
+static void session_read(struct event *thread);
+static void session_write(struct event *thread);
static ssize_t session_get_pdu(struct ibuf_read *, char **);
static void tcp_close(struct tcp_conn *);
static struct pending_conn *pending_conn_new(int, int, union ldpd_addr *);
-static void pending_conn_timeout(struct thread *thread);
+static void pending_conn_timeout(struct event *thread);
int
gen_ldp_hdr(struct ibuf *buf, uint16_t size)
}
/* Discovery functions */
-void disc_recv_packet(struct thread *thread)
+void disc_recv_packet(struct event *thread)
{
- int fd = THREAD_FD(thread);
- struct thread **threadp = THREAD_ARG(thread);
+ int fd = EVENT_FD(thread);
+ struct event **threadp = EVENT_ARG(thread);
union {
struct cmsghdr hdr;
struct in_addr lsr_id;
/* reschedule read */
- thread_add_read(master, disc_recv_packet, threadp, fd, threadp);
+ event_add_read(master, disc_recv_packet, threadp, fd, threadp);
/* setup buffer */
memset(&m, 0, sizeof(m));
return (iface);
}
-void session_accept(struct thread *thread)
+void session_accept(struct event *thread)
{
- int fd = THREAD_FD(thread);
+ int fd = EVENT_FD(thread);
struct sockaddr_storage src;
socklen_t len = sizeof(src);
int newfd;
nbr_fsm(nbr, NBR_EVT_MATCH_ADJ);
}
-static void session_read(struct thread *thread)
+static void session_read(struct event *thread)
{
- int fd = THREAD_FD(thread);
- struct nbr *nbr = THREAD_ARG(thread);
+ int fd = EVENT_FD(thread);
+ struct nbr *nbr = EVENT_ARG(thread);
struct tcp_conn *tcp = nbr->tcp;
struct ldp_hdr *ldp_hdr;
struct ldp_msg *msg;
uint16_t pdu_len, msg_len, msg_size, max_pdu_len;
int ret;
- thread_add_read(master, session_read, nbr, fd, &tcp->rev);
+ event_add_read(master, session_read, nbr, fd, &tcp->rev);
if ((n = read(fd, tcp->rbuf->buf + tcp->rbuf->wpos,
sizeof(tcp->rbuf->buf) - tcp->rbuf->wpos)) == -1) {
free(buf);
}
-static void session_write(struct thread *thread)
+static void session_write(struct event *thread)
{
- struct tcp_conn *tcp = THREAD_ARG(thread);
+ struct tcp_conn *tcp = EVENT_ARG(thread);
struct nbr *nbr = tcp->nbr;
tcp->wbuf.ev = NULL;
switch (nbr->state) {
case NBR_STA_PRESENT:
if (nbr_pending_connect(nbr))
- THREAD_OFF(nbr->ev_connect);
+ EVENT_OFF(nbr->ev_connect);
break;
case NBR_STA_INITIAL:
case NBR_STA_OPENREC:
if ((tcp->rbuf = calloc(1, sizeof(struct ibuf_read))) == NULL)
fatal(__func__);
- thread_add_read(master, session_read, nbr, tcp->fd, &tcp->rev);
+ event_add_read(master, session_read, nbr, tcp->fd, &tcp->rev);
tcp->nbr = nbr;
}
evbuf_clear(&tcp->wbuf);
if (tcp->nbr) {
- THREAD_OFF(tcp->rev);
+ EVENT_OFF(tcp->rev);
free(tcp->rbuf);
tcp->nbr->tcp = NULL;
}
pconn->addr = *addr;
TAILQ_INSERT_TAIL(&global.pending_conns, pconn, entry);
pconn->ev_timeout = NULL;
- thread_add_timer(master, pending_conn_timeout, pconn, PENDING_CONN_TIMEOUT,
- &pconn->ev_timeout);
+ event_add_timer(master, pending_conn_timeout, pconn,
+ PENDING_CONN_TIMEOUT, &pconn->ev_timeout);
return (pconn);
}
void
pending_conn_del(struct pending_conn *pconn)
{
- THREAD_OFF(pconn->ev_timeout);
+ EVENT_OFF(pconn->ev_timeout);
TAILQ_REMOVE(&global.pending_conns, pconn, entry);
free(pconn);
}
return (NULL);
}
-static void pending_conn_timeout(struct thread *thread)
+static void pending_conn_timeout(struct event *thread)
{
- struct pending_conn *pconn = THREAD_ARG(thread);
+ struct pending_conn *pconn = EVENT_ARG(thread);
struct tcp_conn *tcp;
pconn->ev_timeout = NULL;
static bool agentx_enabled = false;
-static struct thread_master *agentx_tm;
-static struct thread *timeout_thr = NULL;
+static struct event_loop *agentx_tm;
+static struct event *timeout_thr = NULL;
static struct list *events = NULL;
static void agentx_events_update(void);
-static void agentx_timeout(struct thread *t)
+static void agentx_timeout(struct event *t)
{
snmp_timeout();
run_alarms();
agentx_events_update();
}
-static void agentx_read(struct thread *t)
+static void agentx_read(struct event *t)
{
fd_set fds;
int flags, new_flags = 0;
int nonblock = false;
- struct listnode *ln = THREAD_ARG(t);
- struct thread **thr = listgetdata(ln);
+ struct listnode *ln = EVENT_ARG(t);
+ struct event **thr = listgetdata(ln);
XFREE(MTYPE_TMP, thr);
list_delete_node(events, ln);
/* fix for non blocking socket */
- flags = fcntl(THREAD_FD(t), F_GETFL, 0);
+ flags = fcntl(EVENT_FD(t), F_GETFL, 0);
if (-1 == flags) {
flog_err(EC_LIB_SYSTEM_CALL, "Failed to get FD settings fcntl: %s(%d)",
strerror(errno), errno);
if (flags & O_NONBLOCK)
nonblock = true;
else
- new_flags = fcntl(THREAD_FD(t), F_SETFL, flags | O_NONBLOCK);
+ new_flags = fcntl(EVENT_FD(t), F_SETFL, flags | O_NONBLOCK);
if (new_flags == -1)
flog_err(EC_LIB_SYSTEM_CALL, "Failed to set snmp fd non blocking: %s(%d)",
strerror(errno), errno);
FD_ZERO(&fds);
- FD_SET(THREAD_FD(t), &fds);
+ FD_SET(EVENT_FD(t), &fds);
snmp_read(&fds);
/* Reset the flag */
if (!nonblock) {
- new_flags = fcntl(THREAD_FD(t), F_SETFL, flags);
+ new_flags = fcntl(EVENT_FD(t), F_SETFL, flags);
if (new_flags == -1)
flog_err(
struct timeval timeout = {.tv_sec = 0, .tv_usec = 0};
fd_set fds;
struct listnode *ln;
- struct thread **thr;
+ struct event **thr;
int fd, thr_fd;
- thread_cancel(&timeout_thr);
+ event_cancel(&timeout_thr);
FD_ZERO(&fds);
snmp_select_info(&maxfd, &fds, &timeout, &block);
if (!block) {
- thread_add_timer_tv(agentx_tm, agentx_timeout, NULL, &timeout,
- &timeout_thr);
+ event_add_timer_tv(agentx_tm, agentx_timeout, NULL, &timeout,
+ &timeout_thr);
}
ln = listhead(events);
thr = ln ? listgetdata(ln) : NULL;
- thr_fd = thr ? THREAD_FD(*thr) : -1;
+ thr_fd = thr ? EVENT_FD(*thr) : -1;
/* "two-pointer" / two-list simultaneous iteration
* ln/thr/thr_fd point to the next existing event listener to hit while
if (thr_fd == fd) {
struct listnode *nextln = listnextnode(ln);
if (!FD_ISSET(fd, &fds)) {
- thread_cancel(thr);
+ event_cancel(thr);
XFREE(MTYPE_TMP, thr);
list_delete_node(events, ln);
}
ln = nextln;
thr = ln ? listgetdata(ln) : NULL;
- thr_fd = thr ? THREAD_FD(*thr) : -1;
+ thr_fd = thr ? EVENT_FD(*thr) : -1;
}
/* need listener, but haven't hit one where it would be */
else if (FD_ISSET(fd, &fds)) {
struct listnode *newln;
- thr = XCALLOC(MTYPE_TMP, sizeof(struct thread *));
+ thr = XCALLOC(MTYPE_TMP, sizeof(struct event *));
newln = listnode_add_before(events, ln, thr);
- thread_add_read(agentx_tm, agentx_read, newln, fd, thr);
+ event_add_read(agentx_tm, agentx_read, newln, fd, thr);
}
}
while (ln) {
struct listnode *nextln = listnextnode(ln);
thr = listgetdata(ln);
- thread_cancel(thr);
+ event_cancel(thr);
XFREE(MTYPE_TMP, thr);
list_delete_node(events, ln);
ln = nextln;
return agentx_enabled;
}
-void smux_init(struct thread_master *tm)
+void smux_init(struct event_loop *tm)
{
agentx_tm = tm;
#include "command.h"
#include "memory.h"
#include "prefix.h"
-#include "thread.h"
+#include "frrevent.h"
#include "stream.h"
#include "vrf.h"
#include "zclient.h"
* Next event.
*
* This variable controls what action to execute when the command batch
- * finishes. Normally we'd use `thread_add_event` value, however since
+ * finishes. Normally we'd use `event_add_event` value, however since
* that function is going to be called multiple times and the value
* might be different we'll use this variable to keep track of it.
*/
* configuration load or northbound batch), so we'll use this to
* install/uninstall the BFD session parameters only once.
*/
- struct thread *installev;
+ struct event *installev;
/** BFD session installation state. */
bool installed;
struct bfd_source_list source_list;
/** Pointer to FRR's event manager. */
- struct thread_master *tm;
+ struct event_loop *tm;
/** Pointer to zebra client data structure. */
struct zclient *zc;
return true;
}
-static void _bfd_sess_send(struct thread *t)
+static void _bfd_sess_send(struct event *t)
{
- struct bfd_session_params *bsp = THREAD_ARG(t);
+ struct bfd_session_params *bsp = EVENT_ARG(t);
int rv;
/* Validate configuration before trying to send bogus data. */
static void _bfd_sess_remove(struct bfd_session_params *bsp)
{
/* Cancel any pending installation request. */
- THREAD_OFF(bsp->installev);
+ EVENT_OFF(bsp->installev);
/* Not installed, nothing to do. */
if (!bsp->installed)
/* Send request to remove any session. */
bsp->lastev = BSE_UNINSTALL;
- thread_execute(bsglobal.tm, _bfd_sess_send, bsp, 0);
+ event_execute(bsglobal.tm, _bfd_sess_send, bsp, 0);
}
void bfd_sess_free(struct bfd_session_params **bsp)
void bfd_sess_install(struct bfd_session_params *bsp)
{
bsp->lastev = BSE_INSTALL;
- thread_add_event(bsglobal.tm, _bfd_sess_send, bsp, 0, &bsp->installev);
+ event_add_event(bsglobal.tm, _bfd_sess_send, bsp, 0, &bsp->installev);
}
void bfd_sess_uninstall(struct bfd_session_params *bsp)
{
bsp->lastev = BSE_UNINSTALL;
- thread_add_event(bsglobal.tm, _bfd_sess_send, bsp, 0, &bsp->installev);
+ event_add_event(bsglobal.tm, _bfd_sess_send, bsp, 0, &bsp->installev);
}
enum bfd_session_state bfd_sess_status(const struct bfd_session_params *bsp)
bsp->installed = false;
/* Cancel any pending installation request. */
- THREAD_OFF(bsp->installev);
+ EVENT_OFF(bsp->installev);
/* Ask for installation. */
bsp->lastev = BSE_INSTALL;
- thread_execute(bsglobal.tm, _bfd_sess_send, bsp, 0);
+ event_execute(bsglobal.tm, _bfd_sess_send, bsp, 0);
}
return 0;
return 0;
}
-void bfd_protocol_integration_init(struct zclient *zc, struct thread_master *tm)
+void bfd_protocol_integration_init(struct zclient *zc, struct event_loop *tm)
{
/* Initialize data structure. */
TAILQ_INIT(&bsglobal.bsplist);
* Initializes the BFD integration library. This function executes the
* following actions:
*
- * - Copy the `struct thread_master` pointer to use as "thread" to execute
+ * - Copy the `struct event_loop` pointer to use as "thread" to execute
* the BFD session parameters installation.
* - Copy the `struct zclient` pointer to install its callbacks.
* - Initializes internal data structures.
* \param tm normally the daemon main thread event manager.
* \param zc the zebra client of the daemon.
*/
-void bfd_protocol_integration_init(struct zclient *zc,
- struct thread_master *tm);
+void bfd_protocol_integration_init(struct zclient *zc, struct event_loop *tm);
/**
* BFD session registration arguments.
#include "memory.h"
#include "log.h"
#include "log_vty.h"
-#include "thread.h"
+#include "frrevent.h"
#include "vector.h"
#include "linklist.h"
#include "vty.h"
install_default(CONFIG_NODE);
- thread_cmd_init();
+ event_cmd_init();
workqueue_cmd_init();
hash_cmd_init();
}
// well
graph_delete_graph(cmd_node->cmdgraph);
vector_free(cmd_node->cmd_vector);
- hash_clean(cmd_node->cmd_hash, NULL);
- hash_free(cmd_node->cmd_hash);
- cmd_node->cmd_hash = NULL;
+ hash_clean_and_free(&cmd_node->cmd_hash, NULL);
}
vector_free(cmdvec);
#define SHARP_STR "Sharp Routing Protocol\n"
#define OSPF_GR_STR \
"OSPF non-stop forwarding (NSF) also known as OSPF Graceful Restart\n"
+#define MGMTD_STR "Management Daemon (MGMTD) information\n"
+#define MGMTD_BE_ADAPTER_STR "MGMTD Backend Adapter information\n"
+#define MGMTD_FE_ADAPTER_STR "MGMTD Frontend Adapter information\n"
+#define MGMTD_TXN_STR "MGMTD Transaction information\n"
+#define MGMTD_DS_STR "MGMTD Datastore information\n"
#define CMD_VNI_RANGE "(1-16777215)"
#define CONF_BACKUP_EXT ".sav"
char string_end;
char *value;
+static const char *yyfilename;
static void extendbuf(char **what, const char *arg)
{
}
}
<rstring>\\\n /* ignore */
+<rstring>\n {
+ fprintf(stderr,
+ "%s:%d: string continues past the end of the line\n",
+ yyfilename, yylineno);
+ free(value);
+ value = NULL;
+ BEGIN(INITIAL);
+ return STRING;
+ }
<rstring>\\. extend(yytext);
-<rstring>[^\\\"\']+ extend(yytext);
+<rstring>[^\\\"\'\n]+ extend(yytext);
"DEFUN" value = strdup(yytext); return DEFUNNY;
"DEFUN_NOSH" value = strdup(yytext); return DEFUNNY;
if (tval[0] == ')')
depth--;
}
+ if (!tval)
+ return PyErr_Format(PyExc_ValueError,
+ "%s:%d: invalid token in DEFPY parameters",
+ filename, lineno);
if (!pyArg)
pyArg = PyList_New(0);
PyList_Append(pyArg, PyUnicode_FromString(tval));
int token;
yyin = fd;
value = NULL;
+ yyfilename = filename;
PyObject *pyCont = PyDict_New();
PyObject *pyObj = PyList_New(0);
if (!pyArgs) {
free(tval);
Py_DECREF(pyCont);
+ yyfilename = NULL;
return NULL;
}
pyItem = PyDict_New();
}
def_yylex_destroy();
fclose(fd);
+ yyfilename = NULL;
return pyCont;
}
void distribute_list_delete(struct distribute_ctx **ctx)
{
- if ((*ctx)->disthash) {
- hash_clean((*ctx)->disthash, (void (*)(void *))distribute_free);
+ hash_clean_and_free(&(*ctx)->disthash,
+ (void (*)(void *))distribute_free);
+
+ if (dist_ctx_list) {
+ listnode_delete(dist_ctx_list, *ctx);
+ if (list_isempty(dist_ctx_list))
+ list_delete(&dist_ctx_list);
}
- if (!dist_ctx_list)
- dist_ctx_list = list_new();
- listnode_delete(dist_ctx_list, *ctx);
- if (list_isempty(dist_ctx_list))
- list_delete(&dist_ctx_list);
+
XFREE(MTYPE_DISTRIBUTE_CTX, (*ctx));
}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Thread management routine
+ * Copyright (C) 1998, 2000 Kunihiro Ishiguro <kunihiro@zebra.org>
+ */
+
+/* #define DEBUG */
+
+#include <zebra.h>
+#include <sys/resource.h>
+
+#include "frrevent.h"
+#include "memory.h"
+#include "frrcu.h"
+#include "log.h"
+#include "hash.h"
+#include "command.h"
+#include "sigevent.h"
+#include "network.h"
+#include "jhash.h"
+#include "frratomic.h"
+#include "frr_pthread.h"
+#include "lib_errors.h"
+#include "libfrr_trace.h"
+#include "libfrr.h"
+
+DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread");
+DEFINE_MTYPE_STATIC(LIB, EVENT_MASTER, "Thread master");
+DEFINE_MTYPE_STATIC(LIB, EVENT_POLL, "Thread Poll Info");
+DEFINE_MTYPE_STATIC(LIB, EVENT_STATS, "Thread stats");
+
+DECLARE_LIST(event_list, struct event, eventitem);
+
+struct cancel_req {
+ int flags;
+ struct event *thread;
+ void *eventobj;
+ struct event **threadref;
+};
+
+/* Flags for task cancellation */
+#define EVENT_CANCEL_FLAG_READY 0x01
+
+static int event_timer_cmp(const struct event *a, const struct event *b)
+{
+ if (a->u.sands.tv_sec < b->u.sands.tv_sec)
+ return -1;
+ if (a->u.sands.tv_sec > b->u.sands.tv_sec)
+ return 1;
+ if (a->u.sands.tv_usec < b->u.sands.tv_usec)
+ return -1;
+ if (a->u.sands.tv_usec > b->u.sands.tv_usec)
+ return 1;
+ return 0;
+}
+
+DECLARE_HEAP(event_timer_list, struct event, timeritem, event_timer_cmp);
+
+#if defined(__APPLE__)
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#endif
+
+#define AWAKEN(m) \
+ do { \
+ const unsigned char wakebyte = 0x01; \
+ write(m->io_pipe[1], &wakebyte, 1); \
+ } while (0)
+
+/* control variable for initializer */
+static pthread_once_t init_once = PTHREAD_ONCE_INIT;
+pthread_key_t thread_current;
+
+static pthread_mutex_t masters_mtx = PTHREAD_MUTEX_INITIALIZER;
+static struct list *masters;
+
+static void thread_free(struct event_loop *master, struct event *thread);
+
+#ifndef EXCLUDE_CPU_TIME
+#define EXCLUDE_CPU_TIME 0
+#endif
+#ifndef CONSUMED_TIME_CHECK
+#define CONSUMED_TIME_CHECK 0
+#endif
+
+bool cputime_enabled = !EXCLUDE_CPU_TIME;
+unsigned long cputime_threshold = CONSUMED_TIME_CHECK;
+unsigned long walltime_threshold = CONSUMED_TIME_CHECK;
+
+/* CLI start ---------------------------------------------------------------- */
+#include "lib/event_clippy.c"
+
+static unsigned int cpu_record_hash_key(const struct cpu_event_history *a)
+{
+ int size = sizeof(a->func);
+
+ return jhash(&a->func, size, 0);
+}
+
+static bool cpu_record_hash_cmp(const struct cpu_event_history *a,
+ const struct cpu_event_history *b)
+{
+ return a->func == b->func;
+}
+
+static void *cpu_record_hash_alloc(struct cpu_event_history *a)
+{
+ struct cpu_event_history *new;
+
+ new = XCALLOC(MTYPE_EVENT_STATS, sizeof(struct cpu_event_history));
+ new->func = a->func;
+ new->funcname = a->funcname;
+ return new;
+}
+
+static void cpu_record_hash_free(void *a)
+{
+ struct cpu_event_history *hist = a;
+
+ XFREE(MTYPE_EVENT_STATS, hist);
+}
+
+static void vty_out_cpu_event_history(struct vty *vty,
+ struct cpu_event_history *a)
+{
+ vty_out(vty,
+ "%5zu %10zu.%03zu %9zu %8zu %9zu %8zu %9zu %9zu %9zu %10zu",
+ a->total_active, a->cpu.total / 1000, a->cpu.total % 1000,
+ a->total_calls, (a->cpu.total / a->total_calls), a->cpu.max,
+ (a->real.total / a->total_calls), a->real.max,
+ a->total_cpu_warn, a->total_wall_warn, a->total_starv_warn);
+ vty_out(vty, " %c%c%c%c%c %s\n",
+ a->types & (1 << EVENT_READ) ? 'R' : ' ',
+ a->types & (1 << EVENT_WRITE) ? 'W' : ' ',
+ a->types & (1 << EVENT_TIMER) ? 'T' : ' ',
+ a->types & (1 << EVENT_EVENT) ? 'E' : ' ',
+ a->types & (1 << EVENT_EXECUTE) ? 'X' : ' ', a->funcname);
+}
+
+static void cpu_record_hash_print(struct hash_bucket *bucket, void *args[])
+{
+ struct cpu_event_history *totals = args[0];
+ struct cpu_event_history copy;
+ struct vty *vty = args[1];
+ uint8_t *filter = args[2];
+
+ struct cpu_event_history *a = bucket->data;
+
+ copy.total_active =
+ atomic_load_explicit(&a->total_active, memory_order_seq_cst);
+ copy.total_calls =
+ atomic_load_explicit(&a->total_calls, memory_order_seq_cst);
+ copy.total_cpu_warn =
+ atomic_load_explicit(&a->total_cpu_warn, memory_order_seq_cst);
+ copy.total_wall_warn =
+ atomic_load_explicit(&a->total_wall_warn, memory_order_seq_cst);
+ copy.total_starv_warn = atomic_load_explicit(&a->total_starv_warn,
+ memory_order_seq_cst);
+ copy.cpu.total =
+ atomic_load_explicit(&a->cpu.total, memory_order_seq_cst);
+ copy.cpu.max = atomic_load_explicit(&a->cpu.max, memory_order_seq_cst);
+ copy.real.total =
+ atomic_load_explicit(&a->real.total, memory_order_seq_cst);
+ copy.real.max =
+ atomic_load_explicit(&a->real.max, memory_order_seq_cst);
+ copy.types = atomic_load_explicit(&a->types, memory_order_seq_cst);
+ copy.funcname = a->funcname;
+
+ if (!(copy.types & *filter))
+ return;
+
+ vty_out_cpu_event_history(vty, ©);
+ totals->total_active += copy.total_active;
+ totals->total_calls += copy.total_calls;
+ totals->total_cpu_warn += copy.total_cpu_warn;
+ totals->total_wall_warn += copy.total_wall_warn;
+ totals->total_starv_warn += copy.total_starv_warn;
+ totals->real.total += copy.real.total;
+ if (totals->real.max < copy.real.max)
+ totals->real.max = copy.real.max;
+ totals->cpu.total += copy.cpu.total;
+ if (totals->cpu.max < copy.cpu.max)
+ totals->cpu.max = copy.cpu.max;
+}
+
+static void cpu_record_print(struct vty *vty, uint8_t filter)
+{
+ struct cpu_event_history tmp;
+ void *args[3] = {&tmp, vty, &filter};
+ struct event_loop *m;
+ struct listnode *ln;
+
+ if (!cputime_enabled)
+ vty_out(vty,
+ "\n"
+ "Collecting CPU time statistics is currently disabled. Following statistics\n"
+ "will be zero or may display data from when collection was enabled. Use the\n"
+ " \"service cputime-stats\" command to start collecting data.\n"
+ "\nCounters and wallclock times are always maintained and should be accurate.\n");
+
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.funcname = "TOTAL";
+ tmp.types = filter;
+
+ frr_with_mutex (&masters_mtx) {
+ for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
+ const char *name = m->name ? m->name : "main";
+ char underline[strlen(name) + 1];
+
+ memset(underline, '-', sizeof(underline));
+ underline[sizeof(underline) - 1] = '\0';
+
+ vty_out(vty, "\n");
+ vty_out(vty, "Showing statistics for pthread %s\n",
+ name);
+ vty_out(vty, "-------------------------------%s\n",
+ underline);
+ vty_out(vty, "%30s %18s %18s\n", "",
+ "CPU (user+system):", "Real (wall-clock):");
+ vty_out(vty,
+ "Active Runtime(ms) Invoked Avg uSec Max uSecs");
+ vty_out(vty, " Avg uSec Max uSecs");
+ vty_out(vty,
+ " CPU_Warn Wall_Warn Starv_Warn Type Thread\n");
+
+ if (m->cpu_record->count)
+ hash_iterate(
+ m->cpu_record,
+ (void (*)(struct hash_bucket *,
+ void *))cpu_record_hash_print,
+ args);
+ else
+ vty_out(vty, "No data to display yet.\n");
+
+ vty_out(vty, "\n");
+ }
+ }
+
+ vty_out(vty, "\n");
+ vty_out(vty, "Total thread statistics\n");
+ vty_out(vty, "-------------------------\n");
+ vty_out(vty, "%30s %18s %18s\n", "",
+ "CPU (user+system):", "Real (wall-clock):");
+ vty_out(vty, "Active Runtime(ms) Invoked Avg uSec Max uSecs");
+ vty_out(vty, " Avg uSec Max uSecs CPU_Warn Wall_Warn");
+ vty_out(vty, " Type Thread\n");
+
+ if (tmp.total_calls > 0)
+ vty_out_cpu_event_history(vty, &tmp);
+}
+
+static void cpu_record_hash_clear(struct hash_bucket *bucket, void *args[])
+{
+ uint8_t *filter = args[0];
+ struct hash *cpu_record = args[1];
+
+ struct cpu_event_history *a = bucket->data;
+
+ if (!(a->types & *filter))
+ return;
+
+ hash_release(cpu_record, bucket->data);
+}
+
+static void cpu_record_clear(uint8_t filter)
+{
+ uint8_t *tmp = &filter;
+ struct event_loop *m;
+ struct listnode *ln;
+
+ frr_with_mutex (&masters_mtx) {
+ for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
+ frr_with_mutex (&m->mtx) {
+ void *args[2] = {tmp, m->cpu_record};
+
+ hash_iterate(
+ m->cpu_record,
+ (void (*)(struct hash_bucket *,
+ void *))cpu_record_hash_clear,
+ args);
+ }
+ }
+ }
+}
+
+static uint8_t parse_filter(const char *filterstr)
+{
+ int i = 0;
+ int filter = 0;
+
+ while (filterstr[i] != '\0') {
+ switch (filterstr[i]) {
+ case 'r':
+ case 'R':
+ filter |= (1 << EVENT_READ);
+ break;
+ case 'w':
+ case 'W':
+ filter |= (1 << EVENT_WRITE);
+ break;
+ case 't':
+ case 'T':
+ filter |= (1 << EVENT_TIMER);
+ break;
+ case 'e':
+ case 'E':
+ filter |= (1 << EVENT_EVENT);
+ break;
+ case 'x':
+ case 'X':
+ filter |= (1 << EVENT_EXECUTE);
+ break;
+ default:
+ break;
+ }
+ ++i;
+ }
+ return filter;
+}
+
+DEFUN_NOSH (show_thread_cpu,
+ show_thread_cpu_cmd,
+ "show thread cpu [FILTER]",
+ SHOW_STR
+ "Thread information\n"
+ "Thread CPU usage\n"
+ "Display filter (rwtex)\n")
+{
+ uint8_t filter = (uint8_t)-1U;
+ int idx = 0;
+
+ if (argv_find(argv, argc, "FILTER", &idx)) {
+ filter = parse_filter(argv[idx]->arg);
+ if (!filter) {
+ vty_out(vty,
+ "Invalid filter \"%s\" specified; must contain at leastone of 'RWTEXB'\n",
+ argv[idx]->arg);
+ return CMD_WARNING;
+ }
+ }
+
+ cpu_record_print(vty, filter);
+ return CMD_SUCCESS;
+}
+
+DEFPY (service_cputime_stats,
+ service_cputime_stats_cmd,
+ "[no] service cputime-stats",
+ NO_STR
+ "Set up miscellaneous service\n"
+ "Collect CPU usage statistics\n")
+{
+ cputime_enabled = !no;
+ return CMD_SUCCESS;
+}
+
+DEFPY (service_cputime_warning,
+ service_cputime_warning_cmd,
+ "[no] service cputime-warning (1-4294967295)",
+ NO_STR
+ "Set up miscellaneous service\n"
+ "Warn for tasks exceeding CPU usage threshold\n"
+ "Warning threshold in milliseconds\n")
+{
+ if (no)
+ cputime_threshold = 0;
+ else
+ cputime_threshold = cputime_warning * 1000;
+ return CMD_SUCCESS;
+}
+
+ALIAS (service_cputime_warning,
+ no_service_cputime_warning_cmd,
+ "no service cputime-warning",
+ NO_STR
+ "Set up miscellaneous service\n"
+ "Warn for tasks exceeding CPU usage threshold\n")
+
+DEFPY (service_walltime_warning,
+ service_walltime_warning_cmd,
+ "[no] service walltime-warning (1-4294967295)",
+ NO_STR
+ "Set up miscellaneous service\n"
+ "Warn for tasks exceeding total wallclock threshold\n"
+ "Warning threshold in milliseconds\n")
+{
+ if (no)
+ walltime_threshold = 0;
+ else
+ walltime_threshold = walltime_warning * 1000;
+ return CMD_SUCCESS;
+}
+
+ALIAS (service_walltime_warning,
+ no_service_walltime_warning_cmd,
+ "no service walltime-warning",
+ NO_STR
+ "Set up miscellaneous service\n"
+ "Warn for tasks exceeding total wallclock threshold\n")
+
+static void show_thread_poll_helper(struct vty *vty, struct event_loop *m)
+{
+ const char *name = m->name ? m->name : "main";
+ char underline[strlen(name) + 1];
+ struct event *thread;
+ uint32_t i;
+
+ memset(underline, '-', sizeof(underline));
+ underline[sizeof(underline) - 1] = '\0';
+
+ vty_out(vty, "\nShowing poll FD's for %s\n", name);
+ vty_out(vty, "----------------------%s\n", underline);
+ vty_out(vty, "Count: %u/%d\n", (uint32_t)m->handler.pfdcount,
+ m->fd_limit);
+ for (i = 0; i < m->handler.pfdcount; i++) {
+ vty_out(vty, "\t%6d fd:%6d events:%2d revents:%2d\t\t", i,
+ m->handler.pfds[i].fd, m->handler.pfds[i].events,
+ m->handler.pfds[i].revents);
+
+ if (m->handler.pfds[i].events & POLLIN) {
+ thread = m->read[m->handler.pfds[i].fd];
+
+ if (!thread)
+ vty_out(vty, "ERROR ");
+ else
+ vty_out(vty, "%s ", thread->xref->funcname);
+ } else
+ vty_out(vty, " ");
+
+ if (m->handler.pfds[i].events & POLLOUT) {
+ thread = m->write[m->handler.pfds[i].fd];
+
+ if (!thread)
+ vty_out(vty, "ERROR\n");
+ else
+ vty_out(vty, "%s\n", thread->xref->funcname);
+ } else
+ vty_out(vty, "\n");
+ }
+}
+
+DEFUN_NOSH (show_thread_poll,
+ show_thread_poll_cmd,
+ "show thread poll",
+ SHOW_STR
+ "Thread information\n"
+ "Show poll FD's and information\n")
+{
+ struct listnode *node;
+ struct event_loop *m;
+
+ frr_with_mutex (&masters_mtx) {
+ for (ALL_LIST_ELEMENTS_RO(masters, node, m))
+ show_thread_poll_helper(vty, m);
+ }
+
+ return CMD_SUCCESS;
+}
+
+
+DEFUN (clear_thread_cpu,
+ clear_thread_cpu_cmd,
+ "clear thread cpu [FILTER]",
+ "Clear stored data in all pthreads\n"
+ "Thread information\n"
+ "Thread CPU usage\n"
+ "Display filter (rwtexb)\n")
+{
+ uint8_t filter = (uint8_t)-1U;
+ int idx = 0;
+
+ if (argv_find(argv, argc, "FILTER", &idx)) {
+ filter = parse_filter(argv[idx]->arg);
+ if (!filter) {
+ vty_out(vty,
+ "Invalid filter \"%s\" specified; must contain at leastone of 'RWTEXB'\n",
+ argv[idx]->arg);
+ return CMD_WARNING;
+ }
+ }
+
+ cpu_record_clear(filter);
+ return CMD_SUCCESS;
+}
+
+static void show_thread_timers_helper(struct vty *vty, struct event_loop *m)
+{
+ const char *name = m->name ? m->name : "main";
+ char underline[strlen(name) + 1];
+ struct event *thread;
+
+ memset(underline, '-', sizeof(underline));
+ underline[sizeof(underline) - 1] = '\0';
+
+ vty_out(vty, "\nShowing timers for %s\n", name);
+ vty_out(vty, "-------------------%s\n", underline);
+
+ frr_each (event_timer_list, &m->timer, thread) {
+ vty_out(vty, " %-50s%pTH\n", thread->hist->funcname, thread);
+ }
+}
+
+DEFPY_NOSH (show_thread_timers,
+ show_thread_timers_cmd,
+ "show thread timers",
+ SHOW_STR
+ "Thread information\n"
+ "Show all timers and how long they have in the system\n")
+{
+ struct listnode *node;
+ struct event_loop *m;
+
+ frr_with_mutex (&masters_mtx) {
+ for (ALL_LIST_ELEMENTS_RO(masters, node, m))
+ show_thread_timers_helper(vty, m);
+ }
+
+ return CMD_SUCCESS;
+}
+
+void event_cmd_init(void)
+{
+ install_element(VIEW_NODE, &show_thread_cpu_cmd);
+ install_element(VIEW_NODE, &show_thread_poll_cmd);
+ install_element(ENABLE_NODE, &clear_thread_cpu_cmd);
+
+ install_element(CONFIG_NODE, &service_cputime_stats_cmd);
+ install_element(CONFIG_NODE, &service_cputime_warning_cmd);
+ install_element(CONFIG_NODE, &no_service_cputime_warning_cmd);
+ install_element(CONFIG_NODE, &service_walltime_warning_cmd);
+ install_element(CONFIG_NODE, &no_service_walltime_warning_cmd);
+
+ install_element(VIEW_NODE, &show_thread_timers_cmd);
+}
+/* CLI end ------------------------------------------------------------------ */
+
+
+static void cancelreq_del(void *cr)
+{
+ XFREE(MTYPE_TMP, cr);
+}
+
+/* initializer, only ever called once */
+static void initializer(void)
+{
+ pthread_key_create(&thread_current, NULL);
+}
+
+struct event_loop *event_master_create(const char *name)
+{
+ struct event_loop *rv;
+ struct rlimit limit;
+
+ pthread_once(&init_once, &initializer);
+
+ rv = XCALLOC(MTYPE_EVENT_MASTER, sizeof(struct event_loop));
+
+ /* Initialize master mutex */
+ pthread_mutex_init(&rv->mtx, NULL);
+ pthread_cond_init(&rv->cancel_cond, NULL);
+
+ /* Set name */
+ name = name ? name : "default";
+ rv->name = XSTRDUP(MTYPE_EVENT_MASTER, name);
+
+ /* Initialize I/O task data structures */
+
+ /* Use configured limit if present, ulimit otherwise. */
+ rv->fd_limit = frr_get_fd_limit();
+ if (rv->fd_limit == 0) {
+ getrlimit(RLIMIT_NOFILE, &limit);
+ rv->fd_limit = (int)limit.rlim_cur;
+ }
+
+ rv->read = XCALLOC(MTYPE_EVENT_POLL,
+ sizeof(struct event *) * rv->fd_limit);
+
+ rv->write = XCALLOC(MTYPE_EVENT_POLL,
+ sizeof(struct event *) * rv->fd_limit);
+
+ char tmhashname[strlen(name) + 32];
+
+ snprintf(tmhashname, sizeof(tmhashname), "%s - threadmaster event hash",
+ name);
+ rv->cpu_record = hash_create_size(
+ 8, (unsigned int (*)(const void *))cpu_record_hash_key,
+ (bool (*)(const void *, const void *))cpu_record_hash_cmp,
+ tmhashname);
+
+ event_list_init(&rv->event);
+ event_list_init(&rv->ready);
+ event_list_init(&rv->unuse);
+ event_timer_list_init(&rv->timer);
+
+ /* Initialize event_fetch() settings */
+ rv->spin = true;
+ rv->handle_signals = true;
+
+ /* Set pthread owner, should be updated by actual owner */
+ rv->owner = pthread_self();
+ rv->cancel_req = list_new();
+ rv->cancel_req->del = cancelreq_del;
+ rv->canceled = true;
+
+ /* Initialize pipe poker */
+ pipe(rv->io_pipe);
+ set_nonblocking(rv->io_pipe[0]);
+ set_nonblocking(rv->io_pipe[1]);
+
+ /* Initialize data structures for poll() */
+ rv->handler.pfdsize = rv->fd_limit;
+ rv->handler.pfdcount = 0;
+ rv->handler.pfds = XCALLOC(MTYPE_EVENT_MASTER,
+ sizeof(struct pollfd) * rv->handler.pfdsize);
+ rv->handler.copy = XCALLOC(MTYPE_EVENT_MASTER,
+ sizeof(struct pollfd) * rv->handler.pfdsize);
+
+ /* add to list of threadmasters */
+ frr_with_mutex (&masters_mtx) {
+ if (!masters)
+ masters = list_new();
+
+ listnode_add(masters, rv);
+ }
+
+ return rv;
+}
+
+void event_master_set_name(struct event_loop *master, const char *name)
+{
+ frr_with_mutex (&master->mtx) {
+ XFREE(MTYPE_EVENT_MASTER, master->name);
+ master->name = XSTRDUP(MTYPE_EVENT_MASTER, name);
+ }
+}
+
+#define EVENT_UNUSED_DEPTH 10
+
+/* Move thread to unuse list. */
+static void thread_add_unuse(struct event_loop *m, struct event *thread)
+{
+ pthread_mutex_t mtxc = thread->mtx;
+
+ assert(m != NULL && thread != NULL);
+
+ thread->hist->total_active--;
+ memset(thread, 0, sizeof(struct event));
+ thread->type = EVENT_UNUSED;
+
+ /* Restore the thread mutex context. */
+ thread->mtx = mtxc;
+
+ if (event_list_count(&m->unuse) < EVENT_UNUSED_DEPTH) {
+ event_list_add_tail(&m->unuse, thread);
+ return;
+ }
+
+ thread_free(m, thread);
+}
+
+/* Free all unused thread. */
+static void thread_list_free(struct event_loop *m, struct event_list_head *list)
+{
+ struct event *t;
+
+ while ((t = event_list_pop(list)))
+ thread_free(m, t);
+}
+
+static void thread_array_free(struct event_loop *m, struct event **thread_array)
+{
+ struct event *t;
+ int index;
+
+ for (index = 0; index < m->fd_limit; ++index) {
+ t = thread_array[index];
+ if (t) {
+ thread_array[index] = NULL;
+ thread_free(m, t);
+ }
+ }
+ XFREE(MTYPE_EVENT_POLL, thread_array);
+}
+
+/*
+ * event_master_free_unused
+ *
+ * As threads are finished with they are put on the
+ * unuse list for later reuse.
+ * If we are shutting down, Free up unused threads
+ * So we can see if we forget to shut anything off
+ */
+void event_master_free_unused(struct event_loop *m)
+{
+ frr_with_mutex (&m->mtx) {
+ struct event *t;
+
+ while ((t = event_list_pop(&m->unuse)))
+ thread_free(m, t);
+ }
+}
+
+/* Stop thread scheduler. */
+void event_master_free(struct event_loop *m)
+{
+ struct event *t;
+
+ frr_with_mutex (&masters_mtx) {
+ listnode_delete(masters, m);
+ if (masters->count == 0)
+ list_delete(&masters);
+ }
+
+ thread_array_free(m, m->read);
+ thread_array_free(m, m->write);
+ while ((t = event_timer_list_pop(&m->timer)))
+ thread_free(m, t);
+ thread_list_free(m, &m->event);
+ thread_list_free(m, &m->ready);
+ thread_list_free(m, &m->unuse);
+ pthread_mutex_destroy(&m->mtx);
+ pthread_cond_destroy(&m->cancel_cond);
+ close(m->io_pipe[0]);
+ close(m->io_pipe[1]);
+ list_delete(&m->cancel_req);
+ m->cancel_req = NULL;
+
+ hash_clean_and_free(&m->cpu_record, cpu_record_hash_free);
+
+ XFREE(MTYPE_EVENT_MASTER, m->name);
+ XFREE(MTYPE_EVENT_MASTER, m->handler.pfds);
+ XFREE(MTYPE_EVENT_MASTER, m->handler.copy);
+ XFREE(MTYPE_EVENT_MASTER, m);
+}
+
+/* Return remain time in milliseconds. */
+unsigned long event_timer_remain_msec(struct event *thread)
+{
+ int64_t remain;
+
+ if (!event_is_scheduled(thread))
+ return 0;
+
+ frr_with_mutex (&thread->mtx) {
+ remain = monotime_until(&thread->u.sands, NULL) / 1000LL;
+ }
+
+ return remain < 0 ? 0 : remain;
+}
+
+/* Return remain time in seconds. */
+unsigned long event_timer_remain_second(struct event *thread)
+{
+ return event_timer_remain_msec(thread) / 1000LL;
+}
+
+struct timeval event_timer_remain(struct event *thread)
+{
+ struct timeval remain;
+
+ frr_with_mutex (&thread->mtx) {
+ monotime_until(&thread->u.sands, &remain);
+ }
+ return remain;
+}
+
+static int time_hhmmss(char *buf, int buf_size, long sec)
+{
+ long hh;
+ long mm;
+ int wr;
+
+ assert(buf_size >= 8);
+
+ hh = sec / 3600;
+ sec %= 3600;
+ mm = sec / 60;
+ sec %= 60;
+
+ wr = snprintf(buf, buf_size, "%02ld:%02ld:%02ld", hh, mm, sec);
+
+ return wr != 8;
+}
+
+char *event_timer_to_hhmmss(char *buf, int buf_size, struct event *t_timer)
+{
+ if (t_timer)
+ time_hhmmss(buf, buf_size, event_timer_remain_second(t_timer));
+ else
+ snprintf(buf, buf_size, "--:--:--");
+
+ return buf;
+}
+
+/* Get new thread. */
+static struct event *thread_get(struct event_loop *m, uint8_t type,
+ void (*func)(struct event *), void *arg,
+ const struct xref_eventsched *xref)
+{
+ struct event *thread = event_list_pop(&m->unuse);
+ struct cpu_event_history tmp;
+
+ if (!thread) {
+ thread = XCALLOC(MTYPE_THREAD, sizeof(struct event));
+ /* mutex only needs to be initialized at struct creation. */
+ pthread_mutex_init(&thread->mtx, NULL);
+ m->alloc++;
+ }
+
+ thread->type = type;
+ thread->add_type = type;
+ thread->master = m;
+ thread->arg = arg;
+ thread->yield = EVENT_YIELD_TIME_SLOT; /* default */
+ thread->ref = NULL;
+ thread->ignore_timer_late = false;
+
+ /*
+ * So if the passed in funcname is not what we have
+ * stored that means the thread->hist needs to be
+ * updated. We keep the last one around in unused
+ * under the assumption that we are probably
+ * going to immediately allocate the same
+ * type of thread.
+ * This hopefully saves us some serious
+ * hash_get lookups.
+ */
+ if ((thread->xref && thread->xref->funcname != xref->funcname)
+ || thread->func != func) {
+ tmp.func = func;
+ tmp.funcname = xref->funcname;
+ thread->hist =
+ hash_get(m->cpu_record, &tmp,
+ (void *(*)(void *))cpu_record_hash_alloc);
+ }
+ thread->hist->total_active++;
+ thread->func = func;
+ thread->xref = xref;
+
+ return thread;
+}
+
+static void thread_free(struct event_loop *master, struct event *thread)
+{
+ /* Update statistics. */
+ assert(master->alloc > 0);
+ master->alloc--;
+
+ /* Free allocated resources. */
+ pthread_mutex_destroy(&thread->mtx);
+ XFREE(MTYPE_THREAD, thread);
+}
+
+static int fd_poll(struct event_loop *m, const struct timeval *timer_wait,
+ bool *eintr_p)
+{
+ sigset_t origsigs;
+ unsigned char trash[64];
+ nfds_t count = m->handler.copycount;
+
+ /*
+ * If timer_wait is null here, that means poll() should block
+ * indefinitely, unless the event_master has overridden it by setting
+ * ->selectpoll_timeout.
+ *
+ * If the value is positive, it specifies the maximum number of
+ * milliseconds to wait. If the timeout is -1, it specifies that
+ * we should never wait and always return immediately even if no
+ * event is detected. If the value is zero, the behavior is default.
+ */
+ int timeout = -1;
+
+ /* number of file descriptors with events */
+ int num;
+
+ if (timer_wait != NULL && m->selectpoll_timeout == 0) {
+ /* use the default value */
+ timeout = (timer_wait->tv_sec * 1000)
+ + (timer_wait->tv_usec / 1000);
+ } else if (m->selectpoll_timeout > 0) {
+ /* use the user's timeout */
+ timeout = m->selectpoll_timeout;
+ } else if (m->selectpoll_timeout < 0) {
+ /* effect a poll (return immediately) */
+ timeout = 0;
+ }
+
+ zlog_tls_buffer_flush();
+ rcu_read_unlock();
+ rcu_assert_read_unlocked();
+
+ /* add poll pipe poker */
+ assert(count + 1 < m->handler.pfdsize);
+ m->handler.copy[count].fd = m->io_pipe[0];
+ m->handler.copy[count].events = POLLIN;
+ m->handler.copy[count].revents = 0x00;
+
+ /* We need to deal with a signal-handling race here: we
+ * don't want to miss a crucial signal, such as SIGTERM or SIGINT,
+ * that may arrive just before we enter poll(). We will block the
+ * key signals, then check whether any have arrived - if so, we return
+ * before calling poll(). If not, we'll re-enable the signals
+ * in the ppoll() call.
+ */
+
+ sigemptyset(&origsigs);
+ if (m->handle_signals) {
+ /* Main pthread that handles the app signals */
+ if (frr_sigevent_check(&origsigs)) {
+ /* Signal to process - restore signal mask and return */
+ pthread_sigmask(SIG_SETMASK, &origsigs, NULL);
+ num = -1;
+ *eintr_p = true;
+ goto done;
+ }
+ } else {
+ /* Don't make any changes for the non-main pthreads */
+ pthread_sigmask(SIG_SETMASK, NULL, &origsigs);
+ }
+
+#if defined(HAVE_PPOLL)
+ struct timespec ts, *tsp;
+
+ if (timeout >= 0) {
+ ts.tv_sec = timeout / 1000;
+ ts.tv_nsec = (timeout % 1000) * 1000000;
+ tsp = &ts;
+ } else
+ tsp = NULL;
+
+ num = ppoll(m->handler.copy, count + 1, tsp, &origsigs);
+ pthread_sigmask(SIG_SETMASK, &origsigs, NULL);
+#else
+ /* Not ideal - there is a race after we restore the signal mask */
+ pthread_sigmask(SIG_SETMASK, &origsigs, NULL);
+ num = poll(m->handler.copy, count + 1, timeout);
+#endif
+
+done:
+
+ if (num < 0 && errno == EINTR)
+ *eintr_p = true;
+
+ if (num > 0 && m->handler.copy[count].revents != 0 && num--)
+ while (read(m->io_pipe[0], &trash, sizeof(trash)) > 0)
+ ;
+
+ rcu_read_lock();
+
+ return num;
+}
+
+/* Add new read thread. */
+void _event_add_read_write(const struct xref_eventsched *xref,
+ struct event_loop *m, void (*func)(struct event *),
+ void *arg, int fd, struct event **t_ptr)
+{
+ int dir = xref->event_type;
+ struct event *thread = NULL;
+ struct event **thread_array;
+
+ if (dir == EVENT_READ)
+ frrtrace(9, frr_libfrr, schedule_read, m,
+ xref->funcname, xref->xref.file, xref->xref.line,
+ t_ptr, fd, 0, arg, 0);
+ else
+ frrtrace(9, frr_libfrr, schedule_write, m,
+ xref->funcname, xref->xref.file, xref->xref.line,
+ t_ptr, fd, 0, arg, 0);
+
+ assert(fd >= 0);
+ if (fd >= m->fd_limit)
+ assert(!"Number of FD's open is greater than FRR currently configured to handle, aborting");
+
+ frr_with_mutex (&m->mtx) {
+ /* Thread is already scheduled; don't reschedule */
+ if (t_ptr && *t_ptr)
+ break;
+
+ /* default to a new pollfd */
+ nfds_t queuepos = m->handler.pfdcount;
+
+ if (dir == EVENT_READ)
+ thread_array = m->read;
+ else
+ thread_array = m->write;
+
+ /*
+ * if we already have a pollfd for our file descriptor, find and
+ * use it
+ */
+ for (nfds_t i = 0; i < m->handler.pfdcount; i++)
+ if (m->handler.pfds[i].fd == fd) {
+ queuepos = i;
+
+#ifdef DEV_BUILD
+ /*
+ * What happens if we have a thread already
+ * created for this event?
+ */
+ if (thread_array[fd])
+ assert(!"Thread already scheduled for file descriptor");
+#endif
+ break;
+ }
+
+ /* make sure we have room for this fd + pipe poker fd */
+ assert(queuepos + 1 < m->handler.pfdsize);
+
+ thread = thread_get(m, dir, func, arg, xref);
+
+ m->handler.pfds[queuepos].fd = fd;
+ m->handler.pfds[queuepos].events |=
+ (dir == EVENT_READ ? POLLIN : POLLOUT);
+
+ if (queuepos == m->handler.pfdcount)
+ m->handler.pfdcount++;
+
+ if (thread) {
+ frr_with_mutex (&thread->mtx) {
+ thread->u.fd = fd;
+ thread_array[thread->u.fd] = thread;
+ }
+
+ if (t_ptr) {
+ *t_ptr = thread;
+ thread->ref = t_ptr;
+ }
+ }
+
+ AWAKEN(m);
+ }
+}
+
+static void _event_add_timer_timeval(const struct xref_eventsched *xref,
+ struct event_loop *m,
+ void (*func)(struct event *), void *arg,
+ struct timeval *time_relative,
+ struct event **t_ptr)
+{
+ struct event *thread;
+ struct timeval t;
+
+ assert(m != NULL);
+
+ assert(time_relative);
+
+ frrtrace(9, frr_libfrr, schedule_timer, m,
+ xref->funcname, xref->xref.file, xref->xref.line,
+ t_ptr, 0, 0, arg, (long)time_relative->tv_sec);
+
+ /* Compute expiration/deadline time. */
+ monotime(&t);
+ timeradd(&t, time_relative, &t);
+
+ frr_with_mutex (&m->mtx) {
+ if (t_ptr && *t_ptr)
+ /* thread is already scheduled; don't reschedule */
+ return;
+
+ thread = thread_get(m, EVENT_TIMER, func, arg, xref);
+
+ frr_with_mutex (&thread->mtx) {
+ thread->u.sands = t;
+ event_timer_list_add(&m->timer, thread);
+ if (t_ptr) {
+ *t_ptr = thread;
+ thread->ref = t_ptr;
+ }
+ }
+
+ /* The timer list is sorted - if this new timer
+ * might change the time we'll wait for, give the pthread
+ * a chance to re-compute.
+ */
+ if (event_timer_list_first(&m->timer) == thread)
+ AWAKEN(m);
+ }
+#define ONEYEAR2SEC (60 * 60 * 24 * 365)
+ if (time_relative->tv_sec > ONEYEAR2SEC)
+ flog_err(
+ EC_LIB_TIMER_TOO_LONG,
+ "Timer: %pTHD is created with an expiration that is greater than 1 year",
+ thread);
+}
+
+
+/* Add timer event thread. */
+void _event_add_timer(const struct xref_eventsched *xref, struct event_loop *m,
+ void (*func)(struct event *), void *arg, long timer,
+ struct event **t_ptr)
+{
+ struct timeval trel;
+
+ assert(m != NULL);
+
+ trel.tv_sec = timer;
+ trel.tv_usec = 0;
+
+ _event_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
+}
+
+/* Add timer event thread with "millisecond" resolution */
+void _event_add_timer_msec(const struct xref_eventsched *xref,
+ struct event_loop *m, void (*func)(struct event *),
+ void *arg, long timer, struct event **t_ptr)
+{
+ struct timeval trel;
+
+ assert(m != NULL);
+
+ trel.tv_sec = timer / 1000;
+ trel.tv_usec = 1000 * (timer % 1000);
+
+ _event_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
+}
+
+/* Add timer event thread with "timeval" resolution */
+void _event_add_timer_tv(const struct xref_eventsched *xref,
+ struct event_loop *m, void (*func)(struct event *),
+ void *arg, struct timeval *tv, struct event **t_ptr)
+{
+ _event_add_timer_timeval(xref, m, func, arg, tv, t_ptr);
+}
+
+/* Add simple event thread. */
+void _event_add_event(const struct xref_eventsched *xref, struct event_loop *m,
+ void (*func)(struct event *), void *arg, int val,
+ struct event **t_ptr)
+{
+ struct event *thread = NULL;
+
+ frrtrace(9, frr_libfrr, schedule_event, m,
+ xref->funcname, xref->xref.file, xref->xref.line,
+ t_ptr, 0, val, arg, 0);
+
+ assert(m != NULL);
+
+ frr_with_mutex (&m->mtx) {
+ if (t_ptr && *t_ptr)
+ /* thread is already scheduled; don't reschedule */
+ break;
+
+ thread = thread_get(m, EVENT_EVENT, func, arg, xref);
+ frr_with_mutex (&thread->mtx) {
+ thread->u.val = val;
+ event_list_add_tail(&m->event, thread);
+ }
+
+ if (t_ptr) {
+ *t_ptr = thread;
+ thread->ref = t_ptr;
+ }
+
+ AWAKEN(m);
+ }
+}
+
+/* Thread cancellation ------------------------------------------------------ */
+
+/**
+ * NOT's out the .events field of pollfd corresponding to the given file
+ * descriptor. The event to be NOT'd is passed in the 'state' parameter.
+ *
+ * This needs to happen for both copies of pollfd's. See 'event_fetch'
+ * implementation for details.
+ *
+ * @param master
+ * @param fd
+ * @param state the event to cancel. One or more (OR'd together) of the
+ * following:
+ * - POLLIN
+ * - POLLOUT
+ */
+static void event_cancel_rw(struct event_loop *master, int fd, short state,
+ int idx_hint)
+{
+ bool found = false;
+
+ /* find the index of corresponding pollfd */
+ nfds_t i;
+
+ /* Cancel POLLHUP too just in case some bozo set it */
+ state |= POLLHUP;
+
+ /* Some callers know the index of the pfd already */
+ if (idx_hint >= 0) {
+ i = idx_hint;
+ found = true;
+ } else {
+ /* Have to look for the fd in the pfd array */
+ for (i = 0; i < master->handler.pfdcount; i++)
+ if (master->handler.pfds[i].fd == fd) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ zlog_debug(
+ "[!] Received cancellation request for nonexistent rw job");
+ zlog_debug("[!] threadmaster: %s | fd: %d",
+ master->name ? master->name : "", fd);
+ return;
+ }
+
+ /* NOT out event. */
+ master->handler.pfds[i].events &= ~(state);
+
+ /* If all events are canceled, delete / resize the pollfd array. */
+ if (master->handler.pfds[i].events == 0) {
+ memmove(master->handler.pfds + i, master->handler.pfds + i + 1,
+ (master->handler.pfdcount - i - 1)
+ * sizeof(struct pollfd));
+ master->handler.pfdcount--;
+ master->handler.pfds[master->handler.pfdcount].fd = 0;
+ master->handler.pfds[master->handler.pfdcount].events = 0;
+ }
+
+ /*
+ * If we have the same pollfd in the copy, perform the same operations,
+ * otherwise return.
+ */
+ if (i >= master->handler.copycount)
+ return;
+
+ master->handler.copy[i].events &= ~(state);
+
+ if (master->handler.copy[i].events == 0) {
+ memmove(master->handler.copy + i, master->handler.copy + i + 1,
+ (master->handler.copycount - i - 1)
+ * sizeof(struct pollfd));
+ master->handler.copycount--;
+ master->handler.copy[master->handler.copycount].fd = 0;
+ master->handler.copy[master->handler.copycount].events = 0;
+ }
+}
+
+/*
+ * Process task cancellation given a task argument: iterate through the
+ * various lists of tasks, looking for any that match the argument.
+ */
+static void cancel_arg_helper(struct event_loop *master,
+ const struct cancel_req *cr)
+{
+ struct event *t;
+ nfds_t i;
+ int fd;
+ struct pollfd *pfd;
+
+ /* We're only processing arg-based cancellations here. */
+ if (cr->eventobj == NULL)
+ return;
+
+ /* First process the ready lists. */
+ frr_each_safe (event_list, &master->event, t) {
+ if (t->arg != cr->eventobj)
+ continue;
+ event_list_del(&master->event, t);
+ if (t->ref)
+ *t->ref = NULL;
+ thread_add_unuse(master, t);
+ }
+
+ frr_each_safe (event_list, &master->ready, t) {
+ if (t->arg != cr->eventobj)
+ continue;
+ event_list_del(&master->ready, t);
+ if (t->ref)
+ *t->ref = NULL;
+ thread_add_unuse(master, t);
+ }
+
+ /* If requested, stop here and ignore io and timers */
+ if (CHECK_FLAG(cr->flags, EVENT_CANCEL_FLAG_READY))
+ return;
+
+ /* Check the io tasks */
+ for (i = 0; i < master->handler.pfdcount;) {
+ pfd = master->handler.pfds + i;
+
+ if (pfd->events & POLLIN)
+ t = master->read[pfd->fd];
+ else
+ t = master->write[pfd->fd];
+
+ if (t && t->arg == cr->eventobj) {
+ fd = pfd->fd;
+
+ /* Found a match to cancel: clean up fd arrays */
+ event_cancel_rw(master, pfd->fd, pfd->events, i);
+
+ /* Clean up thread arrays */
+ master->read[fd] = NULL;
+ master->write[fd] = NULL;
+
+ /* Clear caller's ref */
+ if (t->ref)
+ *t->ref = NULL;
+
+ thread_add_unuse(master, t);
+
+ /* Don't increment 'i' since the cancellation will have
+ * removed the entry from the pfd array
+ */
+ } else
+ i++;
+ }
+
+ /* Check the timer tasks */
+ t = event_timer_list_first(&master->timer);
+ while (t) {
+ struct event *t_next;
+
+ t_next = event_timer_list_next(&master->timer, t);
+
+ if (t->arg == cr->eventobj) {
+ event_timer_list_del(&master->timer, t);
+ if (t->ref)
+ *t->ref = NULL;
+ thread_add_unuse(master, t);
+ }
+
+ t = t_next;
+ }
+}
+
+/**
+ * Process cancellation requests.
+ *
+ * This may only be run from the pthread which owns the event_master.
+ *
+ * @param master the thread master to process
+ * @REQUIRE master->mtx
+ */
+static void do_event_cancel(struct event_loop *master)
+{
+ struct event_list_head *list = NULL;
+ struct event **thread_array = NULL;
+ struct event *thread;
+ struct cancel_req *cr;
+ struct listnode *ln;
+
+ for (ALL_LIST_ELEMENTS_RO(master->cancel_req, ln, cr)) {
+ /*
+ * If this is an event object cancellation, search
+ * through task lists deleting any tasks which have the
+ * specified argument - use this handy helper function.
+ */
+ if (cr->eventobj) {
+ cancel_arg_helper(master, cr);
+ continue;
+ }
+
+ /*
+ * The pointer varies depending on whether the cancellation
+ * request was made asynchronously or not. If it was, we
+ * need to check whether the thread even exists anymore
+ * before cancelling it.
+ */
+ thread = (cr->thread) ? cr->thread : *cr->threadref;
+
+ if (!thread)
+ continue;
+
+ list = NULL;
+ thread_array = NULL;
+
+ /* Determine the appropriate queue to cancel the thread from */
+ switch (thread->type) {
+ case EVENT_READ:
+ event_cancel_rw(master, thread->u.fd, POLLIN, -1);
+ thread_array = master->read;
+ break;
+ case EVENT_WRITE:
+ event_cancel_rw(master, thread->u.fd, POLLOUT, -1);
+ thread_array = master->write;
+ break;
+ case EVENT_TIMER:
+ event_timer_list_del(&master->timer, thread);
+ break;
+ case EVENT_EVENT:
+ list = &master->event;
+ break;
+ case EVENT_READY:
+ list = &master->ready;
+ break;
+ case EVENT_UNUSED:
+ case EVENT_EXECUTE:
+ continue;
+ break;
+ }
+
+ if (list)
+ event_list_del(list, thread);
+ else if (thread_array)
+ thread_array[thread->u.fd] = NULL;
+
+ if (thread->ref)
+ *thread->ref = NULL;
+
+ thread_add_unuse(thread->master, thread);
+ }
+
+ /* Delete and free all cancellation requests */
+ if (master->cancel_req)
+ list_delete_all_node(master->cancel_req);
+
+ /* Wake up any threads which may be blocked in event_cancel_async() */
+ master->canceled = true;
+ pthread_cond_broadcast(&master->cancel_cond);
+}
+
+/*
+ * Helper function used for multiple flavors of arg-based cancellation.
+ */
+static void cancel_event_helper(struct event_loop *m, void *arg, int flags)
+{
+ struct cancel_req *cr;
+
+ assert(m->owner == pthread_self());
+
+ /* Only worth anything if caller supplies an arg. */
+ if (arg == NULL)
+ return;
+
+ cr = XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
+
+ cr->flags = flags;
+
+ frr_with_mutex (&m->mtx) {
+ cr->eventobj = arg;
+ listnode_add(m->cancel_req, cr);
+ do_event_cancel(m);
+ }
+}
+
+/**
+ * Cancel any events which have the specified argument.
+ *
+ * MT-Unsafe
+ *
+ * @param m the event_master to cancel from
+ * @param arg the argument passed when creating the event
+ */
+void event_cancel_event(struct event_loop *master, void *arg)
+{
+ cancel_event_helper(master, arg, 0);
+}
+
+/*
+ * Cancel ready tasks with an arg matching 'arg'
+ *
+ * MT-Unsafe
+ *
+ * @param m the event_master to cancel from
+ * @param arg the argument passed when creating the event
+ */
+void event_cancel_event_ready(struct event_loop *m, void *arg)
+{
+
+ /* Only cancel ready/event tasks */
+ cancel_event_helper(m, arg, EVENT_CANCEL_FLAG_READY);
+}
+
+/**
+ * Cancel a specific task.
+ *
+ * MT-Unsafe
+ *
+ * @param thread task to cancel
+ */
+void event_cancel(struct event **thread)
+{
+ struct event_loop *master;
+
+ if (thread == NULL || *thread == NULL)
+ return;
+
+ master = (*thread)->master;
+
+ frrtrace(9, frr_libfrr, event_cancel, master, (*thread)->xref->funcname,
+ (*thread)->xref->xref.file, (*thread)->xref->xref.line, NULL,
+ (*thread)->u.fd, (*thread)->u.val, (*thread)->arg,
+ (*thread)->u.sands.tv_sec);
+
+ assert(master->owner == pthread_self());
+
+ frr_with_mutex (&master->mtx) {
+ struct cancel_req *cr =
+ XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
+ cr->thread = *thread;
+ listnode_add(master->cancel_req, cr);
+ do_event_cancel(master);
+ }
+
+ *thread = NULL;
+}
+
+/**
+ * Asynchronous cancellation.
+ *
+ * Called with either a struct event ** or void * to an event argument,
+ * this function posts the correct cancellation request and blocks until it is
+ * serviced.
+ *
+ * If the thread is currently running, execution blocks until it completes.
+ *
+ * The last two parameters are mutually exclusive, i.e. if you pass one the
+ * other must be NULL.
+ *
+ * When the cancellation procedure executes on the target event_master, the
+ * thread * provided is checked for nullity. If it is null, the thread is
+ * assumed to no longer exist and the cancellation request is a no-op. Thus
+ * users of this API must pass a back-reference when scheduling the original
+ * task.
+ *
+ * MT-Safe
+ *
+ * @param master the thread master with the relevant event / task
+ * @param thread pointer to thread to cancel
+ * @param eventobj the event
+ */
+void event_cancel_async(struct event_loop *master, struct event **thread,
+ void *eventobj)
+{
+ assert(!(thread && eventobj) && (thread || eventobj));
+
+ if (thread && *thread)
+ frrtrace(9, frr_libfrr, event_cancel_async, master,
+ (*thread)->xref->funcname, (*thread)->xref->xref.file,
+ (*thread)->xref->xref.line, NULL, (*thread)->u.fd,
+ (*thread)->u.val, (*thread)->arg,
+ (*thread)->u.sands.tv_sec);
+ else
+ frrtrace(9, frr_libfrr, event_cancel_async, master, NULL, NULL,
+ 0, NULL, 0, 0, eventobj, 0);
+
+ assert(master->owner != pthread_self());
+
+ frr_with_mutex (&master->mtx) {
+ master->canceled = false;
+
+ if (thread) {
+ struct cancel_req *cr =
+ XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
+ cr->threadref = thread;
+ listnode_add(master->cancel_req, cr);
+ } else if (eventobj) {
+ struct cancel_req *cr =
+ XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
+ cr->eventobj = eventobj;
+ listnode_add(master->cancel_req, cr);
+ }
+ AWAKEN(master);
+
+ while (!master->canceled)
+ pthread_cond_wait(&master->cancel_cond, &master->mtx);
+ }
+
+ if (thread)
+ *thread = NULL;
+}
+/* ------------------------------------------------------------------------- */
+
+static struct timeval *thread_timer_wait(struct event_timer_list_head *timers,
+ struct timeval *timer_val)
+{
+ if (!event_timer_list_count(timers))
+ return NULL;
+
+ struct event *next_timer = event_timer_list_first(timers);
+
+ monotime_until(&next_timer->u.sands, timer_val);
+ return timer_val;
+}
+
+static struct event *thread_run(struct event_loop *m, struct event *thread,
+ struct event *fetch)
+{
+ *fetch = *thread;
+ thread_add_unuse(m, thread);
+ return fetch;
+}
+
+static int thread_process_io_helper(struct event_loop *m, struct event *thread,
+ short state, short actual_state, int pos)
+{
+ struct event **thread_array;
+
+ /*
+ * poll() clears the .events field, but the pollfd array we
+ * pass to poll() is a copy of the one used to schedule threads.
+ * We need to synchronize state between the two here by applying
+ * the same changes poll() made on the copy of the "real" pollfd
+ * array.
+ *
+ * This cleans up a possible infinite loop where we refuse
+ * to respond to a poll event but poll is insistent that
+ * we should.
+ */
+ m->handler.pfds[pos].events &= ~(state);
+
+ if (!thread) {
+ if ((actual_state & (POLLHUP|POLLIN)) != POLLHUP)
+ flog_err(EC_LIB_NO_THREAD,
+ "Attempting to process an I/O event but for fd: %d(%d) no thread to handle this!",
+ m->handler.pfds[pos].fd, actual_state);
+ return 0;
+ }
+
+ if (thread->type == EVENT_READ)
+ thread_array = m->read;
+ else
+ thread_array = m->write;
+
+ thread_array[thread->u.fd] = NULL;
+ event_list_add_tail(&m->ready, thread);
+ thread->type = EVENT_READY;
+
+ return 1;
+}
+
+/**
+ * Process I/O events.
+ *
+ * Walks through file descriptor array looking for those pollfds whose .revents
+ * field has something interesting. Deletes any invalid file descriptors.
+ *
+ * @param m the thread master
+ * @param num the number of active file descriptors (return value of poll())
+ */
+static void thread_process_io(struct event_loop *m, unsigned int num)
+{
+ unsigned int ready = 0;
+ struct pollfd *pfds = m->handler.copy;
+
+ for (nfds_t i = 0; i < m->handler.copycount && ready < num; ++i) {
+ /* no event for current fd? immediately continue */
+ if (pfds[i].revents == 0)
+ continue;
+
+ ready++;
+
+ /*
+ * Unless someone has called event_cancel from another
+ * pthread, the only thing that could have changed in
+ * m->handler.pfds while we were asleep is the .events
+ * field in a given pollfd. Barring event_cancel() that
+ * value should be a superset of the values we have in our
+ * copy, so there's no need to update it. Similarily,
+ * barring deletion, the fd should still be a valid index
+ * into the master's pfds.
+ *
+ * We are including POLLERR here to do a READ event
+ * this is because the read should fail and the
+ * read function should handle it appropriately
+ */
+ if (pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) {
+ thread_process_io_helper(m, m->read[pfds[i].fd], POLLIN,
+ pfds[i].revents, i);
+ }
+ if (pfds[i].revents & POLLOUT)
+ thread_process_io_helper(m, m->write[pfds[i].fd],
+ POLLOUT, pfds[i].revents, i);
+
+ /*
+ * if one of our file descriptors is garbage, remove the same
+ * from both pfds + update sizes and index
+ */
+ if (pfds[i].revents & POLLNVAL) {
+ memmove(m->handler.pfds + i, m->handler.pfds + i + 1,
+ (m->handler.pfdcount - i - 1)
+ * sizeof(struct pollfd));
+ m->handler.pfdcount--;
+ m->handler.pfds[m->handler.pfdcount].fd = 0;
+ m->handler.pfds[m->handler.pfdcount].events = 0;
+
+ memmove(pfds + i, pfds + i + 1,
+ (m->handler.copycount - i - 1)
+ * sizeof(struct pollfd));
+ m->handler.copycount--;
+ m->handler.copy[m->handler.copycount].fd = 0;
+ m->handler.copy[m->handler.copycount].events = 0;
+
+ i--;
+ }
+ }
+}
+
+/* Add all timers that have popped to the ready list. */
+static unsigned int thread_process_timers(struct event_loop *m,
+ struct timeval *timenow)
+{
+ struct timeval prev = *timenow;
+ bool displayed = false;
+ struct event *thread;
+ unsigned int ready = 0;
+
+ while ((thread = event_timer_list_first(&m->timer))) {
+ if (timercmp(timenow, &thread->u.sands, <))
+ break;
+ prev = thread->u.sands;
+ prev.tv_sec += 4;
+ /*
+ * If the timer would have popped 4 seconds in the
+ * past then we are in a situation where we are
+ * really getting behind on handling of events.
+ * Let's log it and do the right thing with it.
+ */
+ if (timercmp(timenow, &prev, >)) {
+ atomic_fetch_add_explicit(
+ &thread->hist->total_starv_warn, 1,
+ memory_order_seq_cst);
+ if (!displayed && !thread->ignore_timer_late) {
+ flog_warn(
+ EC_LIB_STARVE_THREAD,
+ "Thread Starvation: %pTHD was scheduled to pop greater than 4s ago",
+ thread);
+ displayed = true;
+ }
+ }
+
+ event_timer_list_pop(&m->timer);
+ thread->type = EVENT_READY;
+ event_list_add_tail(&m->ready, thread);
+ ready++;
+ }
+
+ return ready;
+}
+
+/* process a list en masse, e.g. for event thread lists */
+static unsigned int thread_process(struct event_list_head *list)
+{
+ struct event *thread;
+ unsigned int ready = 0;
+
+ while ((thread = event_list_pop(list))) {
+ thread->type = EVENT_READY;
+ event_list_add_tail(&thread->master->ready, thread);
+ ready++;
+ }
+ return ready;
+}
+
+
+/* Fetch next ready thread. */
+struct event *event_fetch(struct event_loop *m, struct event *fetch)
+{
+ struct event *thread = NULL;
+ struct timeval now;
+ struct timeval zerotime = {0, 0};
+ struct timeval tv;
+ struct timeval *tw = NULL;
+ bool eintr_p = false;
+ int num = 0;
+
+ do {
+ /* Handle signals if any */
+ if (m->handle_signals)
+ frr_sigevent_process();
+
+ pthread_mutex_lock(&m->mtx);
+
+ /* Process any pending cancellation requests */
+ do_event_cancel(m);
+
+ /*
+ * Attempt to flush ready queue before going into poll().
+ * This is performance-critical. Think twice before modifying.
+ */
+ if ((thread = event_list_pop(&m->ready))) {
+ fetch = thread_run(m, thread, fetch);
+ if (fetch->ref)
+ *fetch->ref = NULL;
+ pthread_mutex_unlock(&m->mtx);
+ if (!m->ready_run_loop)
+ GETRUSAGE(&m->last_getrusage);
+ m->ready_run_loop = true;
+ break;
+ }
+
+ m->ready_run_loop = false;
+ /* otherwise, tick through scheduling sequence */
+
+ /*
+ * Post events to ready queue. This must come before the
+ * following block since events should occur immediately
+ */
+ thread_process(&m->event);
+
+ /*
+ * If there are no tasks on the ready queue, we will poll()
+ * until a timer expires or we receive I/O, whichever comes
+ * first. The strategy for doing this is:
+ *
+ * - If there are events pending, set the poll() timeout to zero
+ * - If there are no events pending, but there are timers
+ * pending, set the timeout to the smallest remaining time on
+ * any timer.
+ * - If there are neither timers nor events pending, but there
+ * are file descriptors pending, block indefinitely in poll()
+ * - If nothing is pending, it's time for the application to die
+ *
+ * In every case except the last, we need to hit poll() at least
+ * once per loop to avoid starvation by events
+ */
+ if (!event_list_count(&m->ready))
+ tw = thread_timer_wait(&m->timer, &tv);
+
+ if (event_list_count(&m->ready) ||
+ (tw && !timercmp(tw, &zerotime, >)))
+ tw = &zerotime;
+
+ if (!tw && m->handler.pfdcount == 0) { /* die */
+ pthread_mutex_unlock(&m->mtx);
+ fetch = NULL;
+ break;
+ }
+
+ /*
+ * Copy pollfd array + # active pollfds in it. Not necessary to
+ * copy the array size as this is fixed.
+ */
+ m->handler.copycount = m->handler.pfdcount;
+ memcpy(m->handler.copy, m->handler.pfds,
+ m->handler.copycount * sizeof(struct pollfd));
+
+ pthread_mutex_unlock(&m->mtx);
+ {
+ eintr_p = false;
+ num = fd_poll(m, tw, &eintr_p);
+ }
+ pthread_mutex_lock(&m->mtx);
+
+ /* Handle any errors received in poll() */
+ if (num < 0) {
+ if (eintr_p) {
+ pthread_mutex_unlock(&m->mtx);
+ /* loop around to signal handler */
+ continue;
+ }
+
+ /* else die */
+ flog_err(EC_LIB_SYSTEM_CALL, "poll() error: %s",
+ safe_strerror(errno));
+ pthread_mutex_unlock(&m->mtx);
+ fetch = NULL;
+ break;
+ }
+
+ /* Post timers to ready queue. */
+ monotime(&now);
+ thread_process_timers(m, &now);
+
+ /* Post I/O to ready queue. */
+ if (num > 0)
+ thread_process_io(m, num);
+
+ pthread_mutex_unlock(&m->mtx);
+
+ } while (!thread && m->spin);
+
+ return fetch;
+}
+
+static unsigned long timeval_elapsed(struct timeval a, struct timeval b)
+{
+ return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO)
+ + (a.tv_usec - b.tv_usec));
+}
+
+unsigned long event_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
+ unsigned long *cputime)
+{
+#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
+
+#ifdef __FreeBSD__
+ /*
+ * FreeBSD appears to have an issue when calling clock_gettime
+ * with CLOCK_THREAD_CPUTIME_ID really close to each other
+ * occassionally the now time will be before the start time.
+ * This is not good and FRR is ending up with CPU HOG's
+ * when the subtraction wraps to very large numbers
+ *
+ * What we are going to do here is cheat a little bit
+ * and notice that this is a problem and just correct
+ * it so that it is impossible to happen
+ */
+ if (start->cpu.tv_sec == now->cpu.tv_sec &&
+ start->cpu.tv_nsec > now->cpu.tv_nsec)
+ now->cpu.tv_nsec = start->cpu.tv_nsec + 1;
+ else if (start->cpu.tv_sec > now->cpu.tv_sec) {
+ now->cpu.tv_sec = start->cpu.tv_sec;
+ now->cpu.tv_nsec = start->cpu.tv_nsec + 1;
+ }
+#endif
+ *cputime = (now->cpu.tv_sec - start->cpu.tv_sec) * TIMER_SECOND_MICRO
+ + (now->cpu.tv_nsec - start->cpu.tv_nsec) / 1000;
+#else
+ /* This is 'user + sys' time. */
+ *cputime = timeval_elapsed(now->cpu.ru_utime, start->cpu.ru_utime)
+ + timeval_elapsed(now->cpu.ru_stime, start->cpu.ru_stime);
+#endif
+ return timeval_elapsed(now->real, start->real);
+}
+
+/*
+ * We should aim to yield after yield milliseconds, which defaults
+ * to EVENT_YIELD_TIME_SLOT .
+ * Note: we are using real (wall clock) time for this calculation.
+ * It could be argued that CPU time may make more sense in certain
+ * contexts. The things to consider are whether the thread may have
+ * blocked (in which case wall time increases, but CPU time does not),
+ * or whether the system is heavily loaded with other processes competing
+ * for CPU time. On balance, wall clock time seems to make sense.
+ * Plus it has the added benefit that gettimeofday should be faster
+ * than calling getrusage.
+ */
+int event_should_yield(struct event *thread)
+{
+ int result;
+
+ frr_with_mutex (&thread->mtx) {
+ result = monotime_since(&thread->real, NULL)
+ > (int64_t)thread->yield;
+ }
+ return result;
+}
+
+void event_set_yield_time(struct event *thread, unsigned long yield_time)
+{
+ frr_with_mutex (&thread->mtx) {
+ thread->yield = yield_time;
+ }
+}
+
+void event_getrusage(RUSAGE_T *r)
+{
+ monotime(&r->real);
+ if (!cputime_enabled) {
+ memset(&r->cpu, 0, sizeof(r->cpu));
+ return;
+ }
+
+#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
+ /* not currently implemented in Linux's vDSO, but maybe at some point
+ * in the future?
+ */
+ clock_gettime(CLOCK_THREAD_CPUTIME_ID, &r->cpu);
+#else /* !HAVE_CLOCK_THREAD_CPUTIME_ID */
+#if defined RUSAGE_THREAD
+#define FRR_RUSAGE RUSAGE_THREAD
+#else
+#define FRR_RUSAGE RUSAGE_SELF
+#endif
+ getrusage(FRR_RUSAGE, &(r->cpu));
+#endif
+}
+
+/*
+ * Call a thread.
+ *
+ * This function will atomically update the thread's usage history. At present
+ * this is the only spot where usage history is written. Nevertheless the code
+ * has been written such that the introduction of writers in the future should
+ * not need to update it provided the writers atomically perform only the
+ * operations done here, i.e. updating the total and maximum times. In
+ * particular, the maximum real and cpu times must be monotonically increasing
+ * or this code is not correct.
+ */
+void event_call(struct event *thread)
+{
+ RUSAGE_T before, after;
+
+ /* if the thread being called is the CLI, it may change cputime_enabled
+ * ("service cputime-stats" command), which can result in nonsensical
+ * and very confusing warnings
+ */
+ bool cputime_enabled_here = cputime_enabled;
+
+ if (thread->master->ready_run_loop)
+ before = thread->master->last_getrusage;
+ else
+ GETRUSAGE(&before);
+
+ thread->real = before.real;
+
+ frrtrace(9, frr_libfrr, event_call, thread->master,
+ thread->xref->funcname, thread->xref->xref.file,
+ thread->xref->xref.line, NULL, thread->u.fd, thread->u.val,
+ thread->arg, thread->u.sands.tv_sec);
+
+ pthread_setspecific(thread_current, thread);
+ (*thread->func)(thread);
+ pthread_setspecific(thread_current, NULL);
+
+ GETRUSAGE(&after);
+ thread->master->last_getrusage = after;
+
+ unsigned long walltime, cputime;
+ unsigned long exp;
+
+ walltime = event_consumed_time(&after, &before, &cputime);
+
+ /* update walltime */
+ atomic_fetch_add_explicit(&thread->hist->real.total, walltime,
+ memory_order_seq_cst);
+ exp = atomic_load_explicit(&thread->hist->real.max,
+ memory_order_seq_cst);
+ while (exp < walltime
+ && !atomic_compare_exchange_weak_explicit(
+ &thread->hist->real.max, &exp, walltime,
+ memory_order_seq_cst, memory_order_seq_cst))
+ ;
+
+ if (cputime_enabled_here && cputime_enabled) {
+ /* update cputime */
+ atomic_fetch_add_explicit(&thread->hist->cpu.total, cputime,
+ memory_order_seq_cst);
+ exp = atomic_load_explicit(&thread->hist->cpu.max,
+ memory_order_seq_cst);
+ while (exp < cputime
+ && !atomic_compare_exchange_weak_explicit(
+ &thread->hist->cpu.max, &exp, cputime,
+ memory_order_seq_cst, memory_order_seq_cst))
+ ;
+ }
+
+ atomic_fetch_add_explicit(&thread->hist->total_calls, 1,
+ memory_order_seq_cst);
+ atomic_fetch_or_explicit(&thread->hist->types, 1 << thread->add_type,
+ memory_order_seq_cst);
+
+ if (cputime_enabled_here && cputime_enabled && cputime_threshold
+ && cputime > cputime_threshold) {
+ /*
+ * We have a CPU Hog on our hands. The time FRR has spent
+ * doing actual work (not sleeping) is greater than 5 seconds.
+ * Whinge about it now, so we're aware this is yet another task
+ * to fix.
+ */
+ atomic_fetch_add_explicit(&thread->hist->total_cpu_warn,
+ 1, memory_order_seq_cst);
+ flog_warn(
+ EC_LIB_SLOW_THREAD_CPU,
+ "CPU HOG: task %s (%lx) ran for %lums (cpu time %lums)",
+ thread->xref->funcname, (unsigned long)thread->func,
+ walltime / 1000, cputime / 1000);
+
+ } else if (walltime_threshold && walltime > walltime_threshold) {
+ /*
+ * The runtime for a task is greater than 5 seconds, but the
+ * cpu time is under 5 seconds. Let's whine about this because
+ * this could imply some sort of scheduling issue.
+ */
+ atomic_fetch_add_explicit(&thread->hist->total_wall_warn,
+ 1, memory_order_seq_cst);
+ flog_warn(
+ EC_LIB_SLOW_THREAD_WALL,
+ "STARVATION: task %s (%lx) ran for %lums (cpu time %lums)",
+ thread->xref->funcname, (unsigned long)thread->func,
+ walltime / 1000, cputime / 1000);
+ }
+}
+
+/* Execute thread */
+void _event_execute(const struct xref_eventsched *xref, struct event_loop *m,
+ void (*func)(struct event *), void *arg, int val)
+{
+ struct event *thread;
+
+ /* Get or allocate new thread to execute. */
+ frr_with_mutex (&m->mtx) {
+ thread = thread_get(m, EVENT_EVENT, func, arg, xref);
+
+ /* Set its event value. */
+ frr_with_mutex (&thread->mtx) {
+ thread->add_type = EVENT_EXECUTE;
+ thread->u.val = val;
+ thread->ref = &thread;
+ }
+ }
+
+ /* Execute thread doing all accounting. */
+ event_call(thread);
+
+ /* Give back or free thread. */
+ thread_add_unuse(m, thread);
+}
+
+/* Debug signal mask - if 'sigs' is NULL, use current effective mask. */
+void debug_signals(const sigset_t *sigs)
+{
+ int i, found;
+ sigset_t tmpsigs;
+ char buf[300];
+
+ /*
+ * We're only looking at the non-realtime signals here, so we need
+ * some limit value. Platform differences mean at some point we just
+ * need to pick a reasonable value.
+ */
+#if defined SIGRTMIN
+# define LAST_SIGNAL SIGRTMIN
+#else
+# define LAST_SIGNAL 32
+#endif
+
+
+ if (sigs == NULL) {
+ sigemptyset(&tmpsigs);
+ pthread_sigmask(SIG_BLOCK, NULL, &tmpsigs);
+ sigs = &tmpsigs;
+ }
+
+ found = 0;
+ buf[0] = '\0';
+
+ for (i = 0; i < LAST_SIGNAL; i++) {
+ char tmp[20];
+
+ if (sigismember(sigs, i) > 0) {
+ if (found > 0)
+ strlcat(buf, ",", sizeof(buf));
+ snprintf(tmp, sizeof(tmp), "%d", i);
+ strlcat(buf, tmp, sizeof(buf));
+ found++;
+ }
+ }
+
+ if (found == 0)
+ snprintf(buf, sizeof(buf), "<none>");
+
+ zlog_debug("%s: %s", __func__, buf);
+}
+
+static ssize_t printfrr_thread_dbg(struct fbuf *buf, struct printfrr_eargs *ea,
+ const struct event *thread)
+{
+ static const char *const types[] = {
+ [EVENT_READ] = "read", [EVENT_WRITE] = "write",
+ [EVENT_TIMER] = "timer", [EVENT_EVENT] = "event",
+ [EVENT_READY] = "ready", [EVENT_UNUSED] = "unused",
+ [EVENT_EXECUTE] = "exec",
+ };
+ ssize_t rv = 0;
+ char info[16] = "";
+
+ if (!thread)
+ return bputs(buf, "{(thread *)NULL}");
+
+ rv += bprintfrr(buf, "{(thread *)%p arg=%p", thread, thread->arg);
+
+ if (thread->type < array_size(types) && types[thread->type])
+ rv += bprintfrr(buf, " %-6s", types[thread->type]);
+ else
+ rv += bprintfrr(buf, " INVALID(%u)", thread->type);
+
+ switch (thread->type) {
+ case EVENT_READ:
+ case EVENT_WRITE:
+ snprintfrr(info, sizeof(info), "fd=%d", thread->u.fd);
+ break;
+
+ case EVENT_TIMER:
+ snprintfrr(info, sizeof(info), "r=%pTVMud", &thread->u.sands);
+ break;
+ case EVENT_READY:
+ case EVENT_EVENT:
+ case EVENT_UNUSED:
+ case EVENT_EXECUTE:
+ break;
+ }
+
+ rv += bprintfrr(buf, " %-12s %s() %s from %s:%d}", info,
+ thread->xref->funcname, thread->xref->dest,
+ thread->xref->xref.file, thread->xref->xref.line);
+ return rv;
+}
+
+printfrr_ext_autoreg_p("TH", printfrr_thread);
+static ssize_t printfrr_thread(struct fbuf *buf, struct printfrr_eargs *ea,
+ const void *ptr)
+{
+ const struct event *thread = ptr;
+ struct timespec remain = {};
+
+ if (ea->fmt[0] == 'D') {
+ ea->fmt++;
+ return printfrr_thread_dbg(buf, ea, thread);
+ }
+
+ if (!thread) {
+ /* need to jump over time formatting flag characters in the
+ * input format string, i.e. adjust ea->fmt!
+ */
+ printfrr_time(buf, ea, &remain,
+ TIMEFMT_TIMER_DEADLINE | TIMEFMT_SKIP);
+ return bputch(buf, '-');
+ }
+
+ TIMEVAL_TO_TIMESPEC(&thread->u.sands, &remain);
+ return printfrr_time(buf, ea, &remain, TIMEFMT_TIMER_DEADLINE);
+}
void log_ref_fini(void)
{
frr_with_mutex (&refs_mtx) {
- hash_clean(refs, NULL);
- hash_free(refs);
- refs = NULL;
+ hash_clean_and_free(&refs, NULL);
}
}
/* initialize mutex */
pthread_mutex_init(&fpt->mtx, NULL);
/* create new thread master */
- fpt->master = thread_master_create(name);
+ fpt->master = event_master_create(name);
/* set attributes */
fpt->attr = *attr;
name = (name ? name : "Anonymous thread");
static void frr_pthread_destroy_nolock(struct frr_pthread *fpt)
{
- thread_master_free(fpt->master);
+ event_master_free(fpt->master);
pthread_mutex_destroy(&fpt->mtx);
pthread_mutex_destroy(fpt->running_cond_mtx);
pthread_cond_destroy(fpt->running_cond);
*/
/* dummy task for sleeper pipe */
-static void fpt_dummy(struct thread *thread)
+static void fpt_dummy(struct event *thread)
{
}
/* poison pill task to end event loop */
-static void fpt_finish(struct thread *thread)
+static void fpt_finish(struct event *thread)
{
- struct frr_pthread *fpt = THREAD_ARG(thread);
+ struct frr_pthread *fpt = EVENT_ARG(thread);
atomic_store_explicit(&fpt->running, false, memory_order_relaxed);
}
/* stop function, called from other threads to halt this one */
static int fpt_halt(struct frr_pthread *fpt, void **res)
{
- thread_add_event(fpt->master, &fpt_finish, fpt, 0, NULL);
+ event_add_event(fpt->master, &fpt_finish, fpt, 0, NULL);
pthread_join(fpt->thread, res);
return 0;
int sleeper[2];
pipe(sleeper);
- thread_add_read(fpt->master, &fpt_dummy, NULL, sleeper[0], NULL);
+ event_add_read(fpt->master, &fpt_dummy, NULL, sleeper[0], NULL);
fpt->master->handle_signals = false;
frr_pthread_notify_running(fpt);
- struct thread task;
+ struct event task;
while (atomic_load_explicit(&fpt->running, memory_order_relaxed)) {
pthread_testcancel();
- if (thread_fetch(fpt->master, &task)) {
- thread_call(&task);
+ if (event_fetch(fpt->master, &task)) {
+ event_call(&task);
}
}
#include "frratomic.h"
#include "memory.h"
#include "frrcu.h"
-#include "thread.h"
+#include "frrevent.h"
#ifdef __cplusplus
extern "C" {
struct rcu_thread *rcu_thread;
/* thread master for this pthread's thread.c event loop */
- struct thread_master *master;
+ struct event_loop *master;
/* caller-specified data; start & stop funcs, name, id */
struct frr_pthread_attr attr;
#include <zebra.h>
#include <zmq.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "frr_zmq.h"
#include "log.h"
}
}
-static void frrzmq_read_msg(struct thread *t)
+static void frrzmq_read_msg(struct event *t)
{
- struct frrzmq_cb **cbp = THREAD_ARG(t);
+ struct frrzmq_cb **cbp = EVENT_ARG(t);
struct frrzmq_cb *cb;
zmq_msg_t msg;
unsigned partno;
if (read)
frrzmq_check_events(cbp, &cb->write, ZMQ_POLLOUT);
- thread_add_read(t->master, frrzmq_read_msg, cbp,
- cb->fd, &cb->read.thread);
+ event_add_read(t->master, frrzmq_read_msg, cbp, cb->fd,
+ &cb->read.thread);
return;
out_err:
cb->read.cb_error(cb->read.arg, cb->zmqsock);
}
-int _frrzmq_thread_add_read(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*msgfunc)(void *arg, void *zmqsock),
- void (*partfunc)(void *arg, void *zmqsock,
- zmq_msg_t *msg, unsigned partnum),
- void (*errfunc)(void *arg, void *zmqsock),
- void *arg, void *zmqsock,
- struct frrzmq_cb **cbp)
+int _frrzmq_event_add_read(const struct xref_eventsched *xref,
+ struct event_loop *master,
+ void (*msgfunc)(void *arg, void *zmqsock),
+ void (*partfunc)(void *arg, void *zmqsock,
+ zmq_msg_t *msg, unsigned partnum),
+ void (*errfunc)(void *arg, void *zmqsock), void *arg,
+ void *zmqsock, struct frrzmq_cb **cbp)
{
int fd, events;
size_t len;
cb->in_cb = false;
if (events & ZMQ_POLLIN) {
- thread_cancel(&cb->read.thread);
+ event_cancel(&cb->read.thread);
- thread_add_event(master, frrzmq_read_msg, cbp, fd,
- &cb->read.thread);
- } else
- thread_add_read(master, frrzmq_read_msg, cbp, fd,
+ event_add_event(master, frrzmq_read_msg, cbp, fd,
&cb->read.thread);
+ } else
+ event_add_read(master, frrzmq_read_msg, cbp, fd,
+ &cb->read.thread);
return 0;
}
-static void frrzmq_write_msg(struct thread *t)
+static void frrzmq_write_msg(struct event *t)
{
- struct frrzmq_cb **cbp = THREAD_ARG(t);
+ struct frrzmq_cb **cbp = EVENT_ARG(t);
struct frrzmq_cb *cb;
unsigned char written = 0;
int ret;
if (written)
frrzmq_check_events(cbp, &cb->read, ZMQ_POLLIN);
- thread_add_write(t->master, frrzmq_write_msg, cbp,
- cb->fd, &cb->write.thread);
+ event_add_write(t->master, frrzmq_write_msg, cbp, cb->fd,
+ &cb->write.thread);
return;
out_err:
cb->write.cb_error(cb->write.arg, cb->zmqsock);
}
-int _frrzmq_thread_add_write(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*msgfunc)(void *arg, void *zmqsock),
- void (*errfunc)(void *arg, void *zmqsock),
- void *arg, void *zmqsock, struct frrzmq_cb **cbp)
+int _frrzmq_event_add_write(const struct xref_eventsched *xref,
+ struct event_loop *master,
+ void (*msgfunc)(void *arg, void *zmqsock),
+ void (*errfunc)(void *arg, void *zmqsock),
+ void *arg, void *zmqsock, struct frrzmq_cb **cbp)
{
int fd, events;
size_t len;
cb->in_cb = false;
if (events & ZMQ_POLLOUT) {
- thread_cancel(&cb->write.thread);
+ event_cancel(&cb->write.thread);
- _thread_add_event(xref, master, frrzmq_write_msg, cbp, fd,
- &cb->write.thread);
- } else
- thread_add_write(master, frrzmq_write_msg, cbp, fd,
+ _event_add_event(xref, master, frrzmq_write_msg, cbp, fd,
&cb->write.thread);
+ } else
+ event_add_write(master, frrzmq_write_msg, cbp, fd,
+ &cb->write.thread);
return 0;
}
if (!cb || !*cb)
return;
core->cancelled = true;
- thread_cancel(&core->thread);
+ event_cancel(&core->thread);
/* If cancelled from within a callback, don't try to free memory
* in this path.
if (zmq_getsockopt(cb->zmqsock, ZMQ_EVENTS, &events, &len))
return;
if ((events & event) && core->thread && !core->cancelled) {
- struct thread_master *tm = core->thread->master;
+ struct event_loop *tm = core->thread->master;
- thread_cancel(&core->thread);
+ event_cancel(&core->thread);
if (event == ZMQ_POLLIN)
- thread_add_event(tm, frrzmq_read_msg,
- cbp, cb->fd, &core->thread);
+ event_add_event(tm, frrzmq_read_msg, cbp, cb->fd,
+ &core->thread);
else
- thread_add_event(tm, frrzmq_write_msg,
- cbp, cb->fd, &core->thread);
+ event_add_event(tm, frrzmq_write_msg, cbp, cb->fd,
+ &core->thread);
}
}
#ifndef _FRRZMQ_H
#define _FRRZMQ_H
-#include "thread.h"
+#include "frrevent.h"
#include <zmq.h>
#ifdef __cplusplus
/* callback integration */
struct cb_core {
- struct thread *thread;
+ struct event *thread;
void *arg;
bool cancelled;
#define _xref_zmq_a(type, f, d, call) \
({ \
- static const struct xref_threadsched _xref \
- __attribute__((used)) = { \
- .xref = XREF_INIT(XREFT_THREADSCHED, NULL, __func__), \
+ static const struct xref_eventsched _xref __attribute__( \
+ (used)) = { \
+ .xref = XREF_INIT(XREFT_EVENTSCHED, NULL, __func__), \
.funcname = #f, \
.dest = #d, \
- .thread_type = THREAD_ ## type, \
+ .event_type = EVENT_##type, \
}; \
XREF_LINK(_xref.xref); \
call; \
- }) \
- /* end */
+ }) /* end */
/* core event registration, one of these 2 macros should be used */
-#define frrzmq_thread_add_read_msg(m, f, e, a, z, d) \
+#define frrzmq_event_add_read_msg(m, f, e, a, z, d) \
_xref_zmq_a(READ, f, d, \
- _frrzmq_thread_add_read(&_xref, m, f, NULL, e, a, z, d))
+ _frrzmq_event_add_read(&_xref, m, f, NULL, e, a, z, d))
-#define frrzmq_thread_add_read_part(m, f, e, a, z, d) \
+#define frrzmq_event_add_read_part(m, f, e, a, z, d) \
_xref_zmq_a(READ, f, d, \
- _frrzmq_thread_add_read(&_xref, m, NULL, f, e, a, z, d))
+ _frrzmq_event_add_read(&_xref, m, NULL, f, e, a, z, d))
-#define frrzmq_thread_add_write_msg(m, f, e, a, z, d) \
+#define frrzmq_event_add_write_msg(m, f, e, a, z, d) \
_xref_zmq_a(WRITE, f, d, \
- _frrzmq_thread_add_write(&_xref, m, f, e, a, z, d))
+ _frrzmq_event_add_write(&_xref, m, f, e, a, z, d))
struct cb_core;
struct frrzmq_cb;
* may schedule the event to run as soon as libfrr is back in its main
* loop.
*/
-extern int _frrzmq_thread_add_read(
- const struct xref_threadsched *xref, struct thread_master *master,
- void (*msgfunc)(void *arg, void *zmqsock),
- void (*partfunc)(void *arg, void *zmqsock, zmq_msg_t *msg,
- unsigned partnum),
- void (*errfunc)(void *arg, void *zmqsock), void *arg, void *zmqsock,
- struct frrzmq_cb **cb);
-extern int _frrzmq_thread_add_write(
- const struct xref_threadsched *xref, struct thread_master *master,
- void (*msgfunc)(void *arg, void *zmqsock),
- void (*errfunc)(void *arg, void *zmqsock), void *arg, void *zmqsock,
- struct frrzmq_cb **cb);
+extern int
+_frrzmq_event_add_read(const struct xref_eventsched *xref,
+ struct event_loop *master,
+ void (*msgfunc)(void *arg, void *zmqsock),
+ void (*partfunc)(void *arg, void *zmqsock,
+ zmq_msg_t *msg, unsigned partnum),
+ void (*errfunc)(void *arg, void *zmqsock), void *arg,
+ void *zmqsock, struct frrzmq_cb **cb);
+extern int _frrzmq_event_add_write(const struct xref_eventsched *xref,
+ struct event_loop *master,
+ void (*msgfunc)(void *arg, void *zmqsock),
+ void (*errfunc)(void *arg, void *zmqsock),
+ void *arg, void *zmqsock,
+ struct frrzmq_cb **cb);
extern void frrzmq_thread_cancel(struct frrzmq_cb **cb, struct cb_core *core);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Event management routine header.
+ * Copyright (C) 1998 Kunihiro Ishiguro
+ */
+
+#ifndef _ZEBRA_THREAD_H
+#define _ZEBRA_THREAD_H
+
+#include <zebra.h>
+#include <pthread.h>
+#include <poll.h>
+#include "monotime.h"
+#include "frratomic.h"
+#include "typesafe.h"
+#include "xref.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern bool cputime_enabled;
+extern unsigned long cputime_threshold;
+/* capturing wallclock time is always enabled since it is fast (reading
+ * hardware TSC w/o syscalls)
+ */
+extern unsigned long walltime_threshold;
+
+struct rusage_t {
+#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
+ struct timespec cpu;
+#else
+ struct rusage cpu;
+#endif
+ struct timeval real;
+};
+#define RUSAGE_T struct rusage_t
+
+#define GETRUSAGE(X) event_getrusage(X)
+
+PREDECL_LIST(event_list);
+PREDECL_HEAP(event_timer_list);
+
+struct fd_handler {
+ /* number of pfd that fit in the allocated space of pfds. This is a
+ * constant and is the same for both pfds and copy.
+ */
+ nfds_t pfdsize;
+
+ /* file descriptors to monitor for i/o */
+ struct pollfd *pfds;
+ /* number of pollfds stored in pfds */
+ nfds_t pfdcount;
+
+ /* chunk used for temp copy of pollfds */
+ struct pollfd *copy;
+ /* number of pollfds stored in copy */
+ nfds_t copycount;
+};
+
+struct xref_eventsched {
+ struct xref xref;
+
+ const char *funcname;
+ const char *dest;
+ uint32_t event_type;
+};
+
+/* Master of the theads. */
+struct event_loop {
+ char *name;
+
+ struct event **read;
+ struct event **write;
+ struct event_timer_list_head timer;
+ struct event_list_head event, ready, unuse;
+ struct list *cancel_req;
+ bool canceled;
+ pthread_cond_t cancel_cond;
+ struct hash *cpu_record;
+ int io_pipe[2];
+ int fd_limit;
+ struct fd_handler handler;
+ unsigned long alloc;
+ long selectpoll_timeout;
+ bool spin;
+ bool handle_signals;
+ pthread_mutex_t mtx;
+ pthread_t owner;
+
+ bool ready_run_loop;
+ RUSAGE_T last_getrusage;
+};
+
+/* Event types. */
+enum event_types {
+ EVENT_READ,
+ EVENT_WRITE,
+ EVENT_TIMER,
+ EVENT_EVENT,
+ EVENT_READY,
+ EVENT_UNUSED,
+ EVENT_EXECUTE,
+};
+
+/* Event itself. */
+struct event {
+ enum event_types type; /* event type */
+ enum event_types add_type; /* event type */
+ struct event_list_item eventitem;
+ struct event_timer_list_item timeritem;
+ struct event **ref; /* external reference (if given) */
+ struct event_loop *master; /* pointer to the struct event_loop */
+ void (*func)(struct event *e); /* event function */
+ void *arg; /* event argument */
+ union {
+ int val; /* second argument of the event. */
+ int fd; /* file descriptor in case of r/w */
+ struct timeval sands; /* rest of time sands value. */
+ } u;
+ struct timeval real;
+ struct cpu_event_history *hist; /* cache pointer to cpu_history */
+ unsigned long yield; /* yield time in microseconds */
+ const struct xref_eventsched *xref; /* origin location */
+ pthread_mutex_t mtx; /* mutex for thread.c functions */
+ bool ignore_timer_late;
+};
+
+#ifdef _FRR_ATTRIBUTE_PRINTFRR
+#pragma FRR printfrr_ext "%pTH"(struct event *)
+#endif
+
+struct cpu_event_history {
+ void (*func)(struct event *e);
+ atomic_size_t total_cpu_warn;
+ atomic_size_t total_wall_warn;
+ atomic_size_t total_starv_warn;
+ atomic_size_t total_calls;
+ atomic_size_t total_active;
+ struct time_stats {
+ atomic_size_t total, max;
+ } real;
+ struct time_stats cpu;
+ atomic_uint_fast32_t types;
+ const char *funcname;
+};
+
+/* Struct timeval's tv_usec one second value. */
+#define TIMER_SECOND_MICRO 1000000L
+
+/* Event yield time. */
+#define EVENT_YIELD_TIME_SLOT 10 * 1000L /* 10ms */
+
+#define EVENT_TIMER_STRLEN 12
+
+/* Macros. */
+#define EVENT_ARG(X) ((X)->arg)
+#define EVENT_FD(X) ((X)->u.fd)
+#define EVENT_VAL(X) ((X)->u.val)
+
+/*
+ * Please consider this macro deprecated, and do not use it in new code.
+ */
+#define EVENT_OFF(thread) \
+ do { \
+ if ((thread)) \
+ event_cancel(&(thread)); \
+ } while (0)
+
+/*
+ * Macro wrappers to generate xrefs for all thread add calls. Includes
+ * file/line/function info for debugging/tracing.
+ */
+#include "lib/xref.h"
+
+#define _xref_t_a(addfn, type, m, f, a, v, t) \
+ ({ \
+ static const struct xref_eventsched _xref __attribute__( \
+ (used)) = { \
+ .xref = XREF_INIT(XREFT_EVENTSCHED, NULL, __func__), \
+ .funcname = #f, \
+ .dest = #t, \
+ .event_type = EVENT_##type, \
+ }; \
+ XREF_LINK(_xref.xref); \
+ _event_add_##addfn(&_xref, m, f, a, v, t); \
+ }) /* end */
+
+#define event_add_read(m, f, a, v, t) _xref_t_a(read_write, READ, m, f, a, v, t)
+#define event_add_write(m, f, a, v, t) \
+ _xref_t_a(read_write, WRITE, m, f, a, v, t)
+#define event_add_timer(m, f, a, v, t) _xref_t_a(timer, TIMER, m, f, a, v, t)
+#define event_add_timer_msec(m, f, a, v, t) \
+ _xref_t_a(timer_msec, TIMER, m, f, a, v, t)
+#define event_add_timer_tv(m, f, a, v, t) \
+ _xref_t_a(timer_tv, TIMER, m, f, a, v, t)
+#define event_add_event(m, f, a, v, t) _xref_t_a(event, EVENT, m, f, a, v, t)
+
+#define event_execute(m, f, a, v) \
+ ({ \
+ static const struct xref_eventsched _xref __attribute__( \
+ (used)) = { \
+ .xref = XREF_INIT(XREFT_EVENTSCHED, NULL, __func__), \
+ .funcname = #f, \
+ .dest = NULL, \
+ .event_type = EVENT_EXECUTE, \
+ }; \
+ XREF_LINK(_xref.xref); \
+ _event_execute(&_xref, m, f, a, v); \
+ }) /* end */
+
+/* Prototypes. */
+extern struct event_loop *event_master_create(const char *name);
+void event_master_set_name(struct event_loop *master, const char *name);
+extern void event_master_free(struct event_loop *m);
+extern void event_master_free_unused(struct event_loop *m);
+
+extern void _event_add_read_write(const struct xref_eventsched *xref,
+ struct event_loop *master,
+ void (*fn)(struct event *), void *arg, int fd,
+ struct event **tref);
+
+extern void _event_add_timer(const struct xref_eventsched *xref,
+ struct event_loop *master,
+ void (*fn)(struct event *), void *arg, long t,
+ struct event **tref);
+
+extern void _event_add_timer_msec(const struct xref_eventsched *xref,
+ struct event_loop *master,
+ void (*fn)(struct event *), void *arg, long t,
+ struct event **tref);
+
+extern void _event_add_timer_tv(const struct xref_eventsched *xref,
+ struct event_loop *master,
+ void (*fn)(struct event *), void *arg,
+ struct timeval *tv, struct event **tref);
+
+extern void _event_add_event(const struct xref_eventsched *xref,
+ struct event_loop *master,
+ void (*fn)(struct event *), void *arg, int val,
+ struct event **tref);
+
+extern void _event_execute(const struct xref_eventsched *xref,
+ struct event_loop *master,
+ void (*fn)(struct event *), void *arg, int val);
+
+extern void event_cancel(struct event **event);
+extern void event_cancel_async(struct event_loop *m, struct event **eptr,
+ void *data);
+/* Cancel ready tasks with an arg matching 'arg' */
+extern void event_cancel_event_ready(struct event_loop *m, void *arg);
+/* Cancel all tasks with an arg matching 'arg', including timers and io */
+extern void event_cancel_event(struct event_loop *m, void *arg);
+extern struct event *event_fetch(struct event_loop *m, struct event *event);
+extern void event_call(struct event *event);
+extern unsigned long event_timer_remain_second(struct event *event);
+extern struct timeval event_timer_remain(struct event *event);
+extern unsigned long event_timer_remain_msec(struct event *event);
+extern int event_should_yield(struct event *event);
+/* set yield time for thread */
+extern void event_set_yield_time(struct event *event, unsigned long ytime);
+
+/* Internal libfrr exports */
+extern void event_getrusage(RUSAGE_T *r);
+extern void event_cmd_init(void);
+
+/* Returns elapsed real (wall clock) time. */
+extern unsigned long event_consumed_time(RUSAGE_T *after, RUSAGE_T *before,
+ unsigned long *cpu_time_elapsed);
+
+/* only for use in logging functions! */
+extern pthread_key_t thread_current;
+extern char *event_timer_to_hhmmss(char *buf, int buf_size,
+ struct event *t_timer);
+
+static inline bool event_is_scheduled(struct event *thread)
+{
+ if (thread)
+ return true;
+
+ return false;
+}
+
+/* Debug signal mask */
+void debug_signals(const sigset_t *sigs);
+
+static inline void event_ignore_late_timer(struct event *event)
+{
+ event->ignore_timer_late = true;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ZEBRA_THREAD_H */
void frrscript_delete(struct frrscript *fs)
{
- hash_clean(fs->lua_function_hash, lua_function_free);
- hash_free(fs->lua_function_hash);
+ hash_clean_and_free(&fs->lua_function_hash, lua_function_free);
XFREE(MTYPE_SCRIPT, fs->name);
XFREE(MTYPE_SCRIPT, fs);
}
void frrscript_fini(void)
{
- hash_clean(codec_hash, codec_free);
- hash_free(codec_hash);
+ hash_clean_and_free(&codec_hash, codec_free);
frrscript_names_destroy();
}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Getopt for GNU.
- * NOTE: getopt is now part of the C library, so if you don't know what
- * "Keep this file name-space clean" means, talk to drepper@gnu.org
- * before changing it!
- *
- * Copyright (C) 1987, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98
- * Free Software Foundation, Inc.
- *
- * NOTE: The canonical source of this file is maintained with the GNU C Library.
- * Bugs can be reported to bug-glibc@gnu.org.
- */
-
-/* This tells Alpha OSF/1 not to define a getopt prototype in <stdio.h>.
- Ditto for AIX 3.2 and <stdlib.h>. */
-#ifndef _NO_PROTO
-# define _NO_PROTO
-#endif
-
-#include <zebra.h>
-
-#if !defined __STDC__ || !__STDC__
-/* This is a separate conditional since some stdc systems
- reject `defined (const)'. */
-#ifndef const
-# define const
-#endif
-#endif
-
-#include <stdio.h>
-
-/* Comment out all this code if we are using the GNU C Library, and are not
- actually compiling the library itself. This code is part of the GNU C
- Library, but also included in many other GNU distributions. Compiling
- and linking in this code is a waste when using the GNU C library
- (especially if it is a shared library). Rather than having every GNU
- program understand `configure --with-gnu-libc' and omit the object files,
- it is simpler to just do this in the source for each such file. */
-
-#define GETOPT_INTERFACE_VERSION 2
-#if !defined _LIBC && defined __GLIBC__ && __GLIBC__ >= 2
-#include <gnu-versions.h>
-#if _GNU_GETOPT_INTERFACE_VERSION == GETOPT_INTERFACE_VERSION
-# define ELIDE_CODE
-#endif
-#endif
-
-#ifndef ELIDE_CODE
-
-
-/* This needs to come after some library #include
- to get __GNU_LIBRARY__ defined. */
-#ifdef __GNU_LIBRARY__
-/* Don't include stdlib.h for non-GNU C libraries because some of them
- contain conflicting prototypes for getopt. */
-#include <stdlib.h>
-#include <unistd.h>
-#endif /* GNU C library. */
-
-#ifdef VMS
-#include <unixlib.h>
-#if HAVE_STRING_H - 0
-#include <string.h>
-#endif
-#endif
-
-#ifndef _
-/* This is for other GNU distributions with internationalized messages.
- When compiling libc, the _ macro is predefined. */
-#ifdef HAVE_LIBINTL_H
-#include <libintl.h>
-# define _(msgid) gettext (msgid)
-#else
-# define _(msgid) (msgid)
-#endif
-#endif
-
-/* This version of `getopt' appears to the caller like standard Unix `getopt'
- but it behaves differently for the user, since it allows the user
- to intersperse the options with the other arguments.
-
- As `getopt' works, it permutes the elements of ARGV so that,
- when it is done, all the options precede everything else. Thus
- all application programs are extended to handle flexible argument order.
-
- Setting the environment variable POSIXLY_CORRECT disables permutation.
- Then the behavior is completely standard.
-
- GNU application programs can use a third alternative mode in which
- they can distinguish the relative order of options and other arguments. */
-
-#include "getopt.h"
-
-/* For communication from `getopt' to the caller.
- When `getopt' finds an option that takes an argument,
- the argument value is returned here.
- Also, when `ordering' is RETURN_IN_ORDER,
- each non-option ARGV-element is returned here. */
-
-char *optarg = NULL;
-
-/* Index in ARGV of the next element to be scanned.
- This is used for communication to and from the caller
- and for communication between successive calls to `getopt'.
-
- On entry to `getopt', zero means this is the first call; initialize.
-
- When `getopt' returns -1, this is the index of the first of the
- non-option elements that the caller should itself scan.
-
- Otherwise, `optind' communicates from one call to the next
- how much of ARGV has been scanned so far. */
-
-/* 1003.2 says this must be 1 before any call. */
-int optind = 1;
-
-/* Formerly, initialization of getopt depended on optind==0, which
- causes problems with re-calling getopt as programs generally don't
- know that. */
-
-int __getopt_initialized = 0;
-
-/* The next char to be scanned in the option-element
- in which the last option character we returned was found.
- This allows us to pick up the scan where we left off.
-
- If this is zero, or a null string, it means resume the scan
- by advancing to the next ARGV-element. */
-
-static char *nextchar;
-
-/* Callers store zero here to inhibit the error message
- for unrecognized options. */
-
-int opterr = 1;
-
-/* Set to an option character which was unrecognized.
- This must be initialized on some systems to avoid linking in the
- system's own getopt implementation. */
-
-int optopt = '?';
-
-/* Describe how to deal with options that follow non-option ARGV-elements.
-
- If the caller did not specify anything,
- the default is REQUIRE_ORDER if the environment variable
- POSIXLY_CORRECT is defined, PERMUTE otherwise.
-
- REQUIRE_ORDER means don't recognize them as options;
- stop option processing when the first non-option is seen.
- This is what Unix does.
- This mode of operation is selected by either setting the environment
- variable POSIXLY_CORRECT, or using `+' as the first character
- of the list of option characters.
-
- PERMUTE is the default. We permute the contents of ARGV as we scan,
- so that eventually all the non-options are at the end. This allows options
- to be given in any order, even with programs that were not written to
- expect this.
-
- RETURN_IN_ORDER is an option available to programs that were written
- to expect options and other ARGV-elements in any order and that care about
- the ordering of the two. We describe each non-option ARGV-element
- as if it were the argument of an option with character code 1.
- Using `-' as the first character of the list of option characters
- selects this mode of operation.
-
- The special argument `--' forces an end of option-scanning regardless
- of the value of `ordering'. In the case of RETURN_IN_ORDER, only
- `--' can cause `getopt' to return -1 with `optind' != ARGC. */
-
-static enum { REQUIRE_ORDER, PERMUTE, RETURN_IN_ORDER } ordering;
-
-/* Value of POSIXLY_CORRECT environment variable. */
-static char *posixly_correct;
-
-#ifdef __GNU_LIBRARY__
-/* We want to avoid inclusion of string.h with non-GNU libraries
- because there are many ways it can cause trouble.
- On some systems, it contains special magic macros that don't work
- in GCC. */
-#include <string.h>
-# define my_index strchr
-#else
-
-#if HAVE_STRING_H
-#include <string.h>
-#else
-#include <strings.h>
-#endif
-
-/* Avoid depending on library functions or files
- whose names are inconsistent. */
-
-#ifndef getenv
-extern char *getenv(const char *);
-#endif
-
-static char *my_index(const char *str, int chr)
-{
- while (*str) {
- if (*str == chr)
- return (char *)str;
- str++;
- }
- return 0;
-}
-
-/* If using GCC, we can safely declare strlen this way.
- If not using GCC, it is ok not to declare it. */
-#ifdef __GNUC__
-/* Note that Motorola Delta 68k R3V7 comes with GCC but not stddef.h.
- That was relevant to code that was here before. */
-#if (!defined __STDC__ || !__STDC__) && !defined strlen
-/* gcc with -traditional declares the built-in strlen to return int,
- and has done so at least since version 2.4.5. -- rms. */
-extern int strlen(const char *);
-#endif /* not __STDC__ */
-#endif /* __GNUC__ */
-
-#endif /* not __GNU_LIBRARY__ */
-
-/* Handle permutation of arguments. */
-
-/* Describe the part of ARGV that contains non-options that have
- been skipped. `first_nonopt' is the index in ARGV of the first of them;
- `last_nonopt' is the index after the last of them. */
-
-static int first_nonopt;
-static int last_nonopt;
-
-#ifdef _LIBC
-/* Bash 2.0 gives us an environment variable containing flags
- indicating ARGV elements that should not be considered arguments. */
-
-/* Defined in getopt_init.c */
-extern char *__getopt_nonoption_flags;
-
-static int nonoption_flags_max_len;
-static int nonoption_flags_len;
-
-static int original_argc;
-static char *const *original_argv;
-
-/* Make sure the environment variable bash 2.0 puts in the environment
- is valid for the getopt call we must make sure that the ARGV passed
- to getopt is that one passed to the process. */
-static void __attribute__((unused))
-store_args_and_env(int argc, char *const *argv)
-{
- /* XXX This is no good solution. We should rather copy the args so
- that we can compare them later. But we must not use malloc(3). */
- original_argc = argc;
- original_argv = argv;
-}
-#ifdef text_set_element
-text_set_element(__libc_subinit, store_args_and_env);
-#endif /* text_set_element */
-
-#define SWAP_FLAGS(ch1, ch2) \
- if (nonoption_flags_len > 0) { \
- char __tmp = __getopt_nonoption_flags[ch1]; \
- __getopt_nonoption_flags[ch1] = __getopt_nonoption_flags[ch2]; \
- __getopt_nonoption_flags[ch2] = __tmp; \
- }
-#else /* !_LIBC */
-# define SWAP_FLAGS(ch1, ch2)
-#endif /* _LIBC */
-
-/* Exchange two adjacent subsequences of ARGV.
- One subsequence is elements [first_nonopt,last_nonopt)
- which contains all the non-options that have been skipped so far.
- The other is elements [last_nonopt,optind), which contains all
- the options processed since those non-options were skipped.
-
- `first_nonopt' and `last_nonopt' are relocated so that they describe
- the new indices of the non-options in ARGV after they are moved. */
-
-#if defined __STDC__ && __STDC__
-static void exchange(char **);
-#endif
-
-static void exchange(argv) char **argv;
-{
- int bottom = first_nonopt;
- int middle = last_nonopt;
- int top = optind;
- char *tem;
-
-/* Exchange the shorter segment with the far end of the longer segment.
- That puts the shorter segment into the right place.
- It leaves the longer segment in the right place overall,
- but it consists of two parts that need to be swapped next. */
-
-#ifdef _LIBC
- /* First make sure the handling of the `__getopt_nonoption_flags'
- string can work normally. Our top argument must be in the range
- of the string. */
- if (nonoption_flags_len > 0 && top >= nonoption_flags_max_len) {
- /* We must extend the array. The user plays games with us and
- presents new arguments. */
- char *new_str = malloc(top + 1);
- if (new_str == NULL)
- nonoption_flags_len = nonoption_flags_max_len = 0;
- else {
- memset(__mempcpy(new_str, __getopt_nonoption_flags,
- nonoption_flags_max_len),
- '\0', top + 1 - nonoption_flags_max_len);
- nonoption_flags_max_len = top + 1;
- __getopt_nonoption_flags = new_str;
- }
- }
-#endif
-
- while (top > middle && middle > bottom) {
- if (top - middle > middle - bottom) {
- /* Bottom segment is the short one. */
- int len = middle - bottom;
- register int i;
-
- /* Swap it with the top part of the top segment. */
- for (i = 0; i < len; i++) {
- tem = argv[bottom + i];
- argv[bottom + i] =
- argv[top - (middle - bottom) + i];
- argv[top - (middle - bottom) + i] = tem;
- SWAP_FLAGS(bottom + i,
- top - (middle - bottom) + i);
- }
- /* Exclude the moved bottom segment from further
- * swapping. */
- top -= len;
- } else {
- /* Top segment is the short one. */
- int len = top - middle;
- register int i;
-
- /* Swap it with the bottom part of the bottom segment.
- */
- for (i = 0; i < len; i++) {
- tem = argv[bottom + i];
- argv[bottom + i] = argv[middle + i];
- argv[middle + i] = tem;
- SWAP_FLAGS(bottom + i, middle + i);
- }
- /* Exclude the moved top segment from further swapping.
- */
- bottom += len;
- }
- }
-
- /* Update records for the slots the non-options now occupy. */
-
- first_nonopt += (optind - last_nonopt);
- last_nonopt = optind;
-}
-
-/* Initialize the internal data when the first call is made. */
-
-#if defined __STDC__ && __STDC__
-static const char *_getopt_initialize(int, char *const *, const char *);
-#endif
-static const char *_getopt_initialize(argc, argv, optstring) int argc;
-char *const *argv;
-const char *optstring;
-{
- /* Start processing options with ARGV-element 1 (since ARGV-element 0
- is the program name); the sequence of previously skipped
- non-option ARGV-elements is empty. */
-
- first_nonopt = last_nonopt = optind;
-
- nextchar = NULL;
-
- posixly_correct = getenv("POSIXLY_CORRECT");
-
- /* Determine how to handle the ordering of options and nonoptions. */
-
- if (optstring[0] == '-') {
- ordering = RETURN_IN_ORDER;
- ++optstring;
- } else if (optstring[0] == '+') {
- ordering = REQUIRE_ORDER;
- ++optstring;
- } else if (posixly_correct != NULL)
- ordering = REQUIRE_ORDER;
- else
- ordering = PERMUTE;
-
-#ifdef _LIBC
- if (posixly_correct == NULL && argc == original_argc
- && argv == original_argv) {
- if (nonoption_flags_max_len == 0) {
- if (__getopt_nonoption_flags == NULL
- || __getopt_nonoption_flags[0] == '\0')
- nonoption_flags_max_len = -1;
- else {
- const char *orig_str = __getopt_nonoption_flags;
- int len = nonoption_flags_max_len =
- strlen(orig_str);
- if (nonoption_flags_max_len < argc)
- nonoption_flags_max_len = argc;
- __getopt_nonoption_flags =
- (char *)malloc(nonoption_flags_max_len);
- if (__getopt_nonoption_flags == NULL)
- nonoption_flags_max_len = -1;
- else
- memset(__mempcpy(
- __getopt_nonoption_flags,
- orig_str, len),
- '\0',
- nonoption_flags_max_len - len);
- }
- }
- nonoption_flags_len = nonoption_flags_max_len;
- } else
- nonoption_flags_len = 0;
-#endif
-
- return optstring;
-}
-
-/* Scan elements of ARGV (whose length is ARGC) for option characters
- given in OPTSTRING.
-
- If an element of ARGV starts with '-', and is not exactly "-" or "--",
- then it is an option element. The characters of this element
- (aside from the initial '-') are option characters. If `getopt'
- is called repeatedly, it returns successively each of the option characters
- from each of the option elements.
-
- If `getopt' finds another option character, it returns that character,
- updating `optind' and `nextchar' so that the next call to `getopt' can
- resume the scan with the following option character or ARGV-element.
-
- If there are no more option characters, `getopt' returns -1.
- Then `optind' is the index in ARGV of the first ARGV-element
- that is not an option. (The ARGV-elements have been permuted
- so that those that are not options now come last.)
-
- OPTSTRING is a string containing the legitimate option characters.
- If an option character is seen that is not listed in OPTSTRING,
- return '?' after printing an error message. If you set `opterr' to
- zero, the error message is suppressed but we still return '?'.
-
- If a char in OPTSTRING is followed by a colon, that means it wants an arg,
- so the following text in the same ARGV-element, or the text of the following
- ARGV-element, is returned in `optarg'. Two colons mean an option that
- wants an optional arg; if there is text in the current ARGV-element,
- it is returned in `optarg', otherwise `optarg' is set to zero.
-
- If OPTSTRING starts with `-' or `+', it requests different methods of
- handling the non-option ARGV-elements.
- See the comments about RETURN_IN_ORDER and REQUIRE_ORDER, above.
-
- Long-named options begin with `--' instead of `-'.
- Their names may be abbreviated as long as the abbreviation is unique
- or is an exact match for some defined option. If they have an
- argument, it follows the option name in the same ARGV-element, separated
- from the option name by a `=', or else the in next ARGV-element.
- When `getopt' finds a long-named option, it returns 0 if that option's
- `flag' field is nonzero, the value of the option's `val' field
- if the `flag' field is zero.
-
- The elements of ARGV aren't really const, because we permute them.
- But we pretend they're const in the prototype to be compatible
- with other systems.
-
- LONGOPTS is a vector of `struct option' terminated by an
- element containing a name which is zero.
-
- LONGIND returns the index in LONGOPT of the long-named option found.
- It is only valid when a long-named option has been found by the most
- recent call.
-
- If LONG_ONLY is nonzero, '-' as well as '--' can introduce
- long-named options. */
-
-int _getopt_internal(argc, argv, optstring, longopts, longind,
- long_only) int argc;
-char *const *argv;
-const char *optstring;
-const struct option *longopts;
-int *longind;
-int long_only;
-{
- optarg = NULL;
-
- if (optind == 0 || !__getopt_initialized) {
- if (optind == 0)
- optind = 1; /* Don't scan ARGV[0], the program name. */
- optstring = _getopt_initialize(argc, argv, optstring);
- __getopt_initialized = 1;
- }
-
-/* Test whether ARGV[optind] points to a non-option argument.
- Either it does not have option syntax, or there is an environment flag
- from the shell indicating it is not an option. The later information
- is only used when the used in the GNU libc. */
-#ifdef _LIBC
-#define NONOPTION_P \
- (argv[optind][0] != '-' || argv[optind][1] == '\0' \
- || (optind < nonoption_flags_len \
- && __getopt_nonoption_flags[optind] == '1'))
-#else
-# define NONOPTION_P (argv[optind][0] != '-' || argv[optind][1] == '\0')
-#endif
-
- if (nextchar == NULL || *nextchar == '\0') {
- /* Advance to the next ARGV-element. */
-
- /* Give FIRST_NONOPT & LAST_NONOPT rational values if OPTIND has
- been
- moved back by the user (who may also have changed the
- arguments). */
- if (last_nonopt > optind)
- last_nonopt = optind;
- if (first_nonopt > optind)
- first_nonopt = optind;
-
- if (ordering == PERMUTE) {
- /* If we have just processed some options following some
- non-options,
- exchange them so that the options come first. */
-
- if (first_nonopt != last_nonopt
- && last_nonopt != optind)
- exchange((char **)argv);
- else if (last_nonopt != optind)
- first_nonopt = optind;
-
- /* Skip any additional non-options
- and extend the range of non-options previously
- skipped. */
-
- while (optind < argc && NONOPTION_P)
- optind++;
- last_nonopt = optind;
- }
-
- /* The special ARGV-element `--' means premature end of options.
- Skip it like a null option,
- then exchange with previous non-options as if it were an
- option,
- then skip everything else like a non-option. */
-
- if (optind != argc && !strcmp(argv[optind], "--")) {
- optind++;
-
- if (first_nonopt != last_nonopt
- && last_nonopt != optind)
- exchange((char **)argv);
- else if (first_nonopt == last_nonopt)
- first_nonopt = optind;
- last_nonopt = argc;
-
- optind = argc;
- }
-
- /* If we have done all the ARGV-elements, stop the scan
- and back over any non-options that we skipped and permuted.
- */
-
- if (optind == argc) {
- /* Set the next-arg-index to point at the non-options
- that we previously skipped, so the caller will digest
- them. */
- if (first_nonopt != last_nonopt)
- optind = first_nonopt;
- return -1;
- }
-
- /* If we have come to a non-option and did not permute it,
- either stop the scan or describe it to the caller and pass it
- by. */
-
- if (NONOPTION_P) {
- if (ordering == REQUIRE_ORDER)
- return -1;
- optarg = argv[optind++];
- return 1;
- }
-
- /* We have found another option-ARGV-element.
- Skip the initial punctuation. */
-
- nextchar = (argv[optind] + 1
- + (longopts != NULL && argv[optind][1] == '-'));
- }
-
- /* Decode the current option-ARGV-element. */
-
- /* Check whether the ARGV-element is a long option.
-
- If long_only and the ARGV-element has the form "-f", where f is
- a valid short option, don't consider it an abbreviated form of
- a long option that starts with f. Otherwise there would be no
- way to give the -f short option.
-
- On the other hand, if there's a long option "fubar" and
- the ARGV-element is "-fu", do consider that an abbreviation of
- the long option, just like "--fu", and not "-f" with arg "u".
-
- This distinction seems to be the most useful approach. */
-
- if (longopts != NULL
- && (argv[optind][1] == '-'
- || (long_only && (argv[optind][2]
- || !my_index(optstring, argv[optind][1]))))) {
- char *nameend;
- const struct option *p;
- const struct option *pfound = NULL;
- int exact = 0;
- int ambig = 0;
- int indfound = -1;
- int option_index;
-
- for (nameend = nextchar; *nameend && *nameend != '='; nameend++)
- /* Do nothing. */;
-
- /* Test all long options for either exact match
- or abbreviated matches. */
- for (p = longopts, option_index = 0; p->name;
- p++, option_index++)
- if (!strncmp(p->name, nextchar, nameend - nextchar)) {
- if ((unsigned int)(nameend - nextchar)
- == (unsigned int)strlen(p->name)) {
- /* Exact match found. */
- pfound = p;
- indfound = option_index;
- exact = 1;
- break;
- } else if (pfound == NULL) {
- /* First nonexact match found. */
- pfound = p;
- indfound = option_index;
- } else
- /* Second or later nonexact match found.
- */
- ambig = 1;
- }
-
- if (ambig && !exact) {
- if (opterr)
- fprintf(stderr,
- _("%s: option `%s' is ambiguous\n"),
- argv[0], argv[optind]);
- nextchar += strlen(nextchar);
- optind++;
- optopt = 0;
- return '?';
- }
-
- if (pfound != NULL) {
- option_index = indfound;
- optind++;
- if (*nameend) {
- /* Don't test has_arg with >, because some C
- compilers don't
- allow it to be used on enums. */
- if (pfound->has_arg)
- optarg = nameend + 1;
- else {
- if (opterr) {
- if (argv[optind - 1][1] == '-')
- /* --option */
- fprintf(stderr,
- _("%s: option `--%s' doesn't allow an argument\n"),
- argv[0],
- pfound->name);
- else
- /* +option or -option */
- fprintf(stderr,
- _("%s: option `%c%s' doesn't allow an argument\n"),
- argv[0],
- argv[optind - 1]
- [0],
- pfound->name);
- }
-
- nextchar += strlen(nextchar);
-
- optopt = pfound->val;
- return '?';
- }
- } else if (pfound->has_arg == 1) {
- if (optind < argc)
- optarg = argv[optind++];
- else {
- if (opterr)
- fprintf(stderr,
- _("%s: option `%s' requires an argument\n"),
- argv[0],
- argv[optind - 1]);
- nextchar += strlen(nextchar);
- optopt = pfound->val;
- return optstring[0] == ':' ? ':' : '?';
- }
- }
- nextchar += strlen(nextchar);
- if (longind != NULL)
- *longind = option_index;
- if (pfound->flag) {
- *(pfound->flag) = pfound->val;
- return 0;
- }
- return pfound->val;
- }
-
- /* Can't find it as a long option. If this is not
- getopt_long_only,
- or the option starts with '--' or is not a valid short
- option, then it's an error.
- Otherwise interpret it as a short option. */
- if (!long_only || argv[optind][1] == '-'
- || my_index(optstring, *nextchar) == NULL) {
- if (opterr) {
- if (argv[optind][1] == '-')
- /* --option */
- fprintf(stderr,
- _("%s: unrecognized option `--%s'\n"),
- argv[0], nextchar);
- else
- /* +option or -option */
- fprintf(stderr,
- _("%s: unrecognized option `%c%s'\n"),
- argv[0], argv[optind][0],
- nextchar);
- }
- nextchar = (char *)"";
- optind++;
- optopt = 0;
- return '?';
- }
- }
-
- /* Look at and handle the next short option-character. */
-
- {
- char c = *nextchar++;
- char *temp = my_index(optstring, c);
-
- /* Increment `optind' when we start to process its last
- * character. */
- if (*nextchar == '\0')
- ++optind;
-
- if (temp == NULL || c == ':') {
- if (opterr) {
- if (posixly_correct)
- /* 1003.2 specifies the format of this
- * message. */
- fprintf(stderr,
- _("%s: illegal option -- %c\n"),
- argv[0], c);
- else
- fprintf(stderr,
- _("%s: invalid option -- %c\n"),
- argv[0], c);
- }
- optopt = c;
- return '?';
- }
- /* Convenience. Treat POSIX -W foo same as long option --foo */
- if (temp[0] == 'W' && temp[1] == ';') {
- char *nameend;
- const struct option *p;
- const struct option *pfound = NULL;
- int exact = 0;
- int ambig = 0;
- int indfound = 0;
- int option_index;
-
- /* This is an option that requires an argument. */
- if (*nextchar != '\0') {
- optarg = nextchar;
- /* If we end this ARGV-element by taking the
- rest as an arg,
- we must advance to the next element now. */
- optind++;
- } else if (optind == argc) {
- if (opterr) {
- /* 1003.2 specifies the format of this
- * message. */
- fprintf(stderr,
- _("%s: option requires an argument -- %c\n"),
- argv[0], c);
- }
- optopt = c;
- if (optstring[0] == ':')
- c = ':';
- else
- c = '?';
- return c;
- } else
- /* We already incremented `optind' once;
- increment it again when taking next ARGV-elt
- as argument. */
- optarg = argv[optind++];
-
- /* optarg is now the argument, see if it's in the
- table of longopts. */
-
- for (nextchar = nameend = optarg;
- *nameend && *nameend != '='; nameend++)
- /* Do nothing. */;
-
- /* Test all long options for either exact match
- or abbreviated matches. */
- for (p = longopts, option_index = 0; p->name;
- p++, option_index++)
- if (!strncmp(p->name, nextchar,
- nameend - nextchar)) {
- if ((unsigned int)(nameend - nextchar)
- == strlen(p->name)) {
- /* Exact match found. */
- pfound = p;
- indfound = option_index;
- exact = 1;
- break;
- } else if (pfound == NULL) {
- /* First nonexact match found.
- */
- pfound = p;
- indfound = option_index;
- } else
- /* Second or later nonexact
- * match found. */
- ambig = 1;
- }
- if (ambig && !exact) {
- if (opterr)
- fprintf(stderr,
- _("%s: option `-W %s' is ambiguous\n"),
- argv[0], argv[optind]);
- nextchar += strlen(nextchar);
- optind++;
- return '?';
- }
- if (pfound != NULL) {
- option_index = indfound;
- if (*nameend) {
- /* Don't test has_arg with >, because
- some C compilers don't
- allow it to be used on enums. */
- if (pfound->has_arg)
- optarg = nameend + 1;
- else {
- if (opterr)
- fprintf(stderr, _("\
-%s: option `-W %s' doesn't allow an argument\n"),
- argv[0],
- pfound->name);
-
- nextchar += strlen(nextchar);
- return '?';
- }
- } else if (pfound->has_arg == 1) {
- if (optind < argc)
- optarg = argv[optind++];
- else {
- if (opterr)
- fprintf(stderr,
- _("%s: option `%s' requires an argument\n"),
- argv[0],
- argv[optind
- - 1]);
- nextchar += strlen(nextchar);
- return optstring[0] == ':'
- ? ':'
- : '?';
- }
- }
- nextchar += strlen(nextchar);
- if (longind != NULL)
- *longind = option_index;
- if (pfound->flag) {
- *(pfound->flag) = pfound->val;
- return 0;
- }
- return pfound->val;
- }
- nextchar = NULL;
- return 'W'; /* Let the application handle it. */
- }
- if (temp[1] == ':') {
- if (temp[2] == ':') {
- /* This is an option that accepts an argument
- * optionally. */
- if (*nextchar != '\0') {
- optarg = nextchar;
- optind++;
- } else
- optarg = NULL;
- nextchar = NULL;
- } else {
- /* This is an option that requires an argument.
- */
- if (*nextchar != '\0') {
- optarg = nextchar;
- /* If we end this ARGV-element by taking
- the rest as an arg,
- we must advance to the next element
- now. */
- optind++;
- } else if (optind == argc) {
- if (opterr) {
- /* 1003.2 specifies the format
- * of this message. */
- fprintf(stderr,
- _("%s: option requires an argument -- %c\n"),
- argv[0], c);
- }
- optopt = c;
- if (optstring[0] == ':')
- c = ':';
- else
- c = '?';
- } else
- /* We already incremented `optind' once;
- increment it again when taking next
- ARGV-elt as argument. */
- optarg = argv[optind++];
- nextchar = NULL;
- }
- }
- return c;
- }
-}
-
-#ifdef REALLY_NEED_PLAIN_GETOPT
-
-int getopt(argc, argv, optstring) int argc;
-char *const *argv;
-const char *optstring;
-{
- return _getopt_internal(argc, argv, optstring, (const struct option *)0,
- (int *)0, 0);
-}
-
-#endif /* REALLY_NEED_PLAIN_GETOPT */
-
-#endif /* Not ELIDE_CODE. */
-
-#ifdef TEST
-
-/* Compile with -DTEST to make an executable for use in testing
- the above definition of `getopt'. */
-
-int main(argc, argv) int argc;
-char **argv;
-{
- int c;
- int digit_optind = 0;
-
- while (1) {
- int this_option_optind = optind ? optind : 1;
-
- c = getopt(argc, argv, "abc:d:0123456789");
- if (c == -1)
- break;
-
- switch (c) {
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- if (digit_optind != 0
- && digit_optind != this_option_optind)
- printf("digits occur in two different argv-elements.\n");
- digit_optind = this_option_optind;
- printf("option %c\n", c);
- break;
-
- case 'a':
- printf("option a\n");
- break;
-
- case 'b':
- printf("option b\n");
- break;
-
- case 'c':
- printf("option c with value `%s'\n", optarg);
- break;
-
- case '?':
- break;
-
- default:
- printf("?? getopt returned character code 0%o ??\n", c);
- }
- }
-
- if (optind < argc) {
- printf("non-option ARGV-elements: ");
- while (optind < argc)
- printf("%s ", argv[optind++]);
- printf("\n");
- }
-
- exit(0);
-}
-
-#endif /* TEST */
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Declarations for getopt.
- * Copyright (C) 1989,90,91,92,93,94,96,97 Free Software Foundation, Inc.
- *
- * NOTE: The canonical source of this file is maintained with the GNU C Library.
- * Bugs can be reported to bug-glibc@gnu.org.
- */
-
-#ifndef _GETOPT_H
-#define _GETOPT_H 1
-
-/*
- * The operating system may or may not provide getopt_long(), and if
- * so it may or may not be a version we are willing to use. Our
- * strategy is to declare getopt here, and then provide code unless
- * the supplied version is adequate. The difficult case is when a
- * declaration for getopt is provided, as our declaration must match.
- *
- * XXX Arguably this version should be named differently, and the
- * local names defined to refer to the system version when we choose
- * to use the system version.
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* For communication from `getopt' to the caller.
- When `getopt' finds an option that takes an argument,
- the argument value is returned here.
- Also, when `ordering' is RETURN_IN_ORDER,
- each non-option ARGV-element is returned here. */
-
-extern char *optarg;
-
-/* Index in ARGV of the next element to be scanned.
- This is used for communication to and from the caller
- and for communication between successive calls to `getopt'.
-
- On entry to `getopt', zero means this is the first call; initialize.
-
- When `getopt' returns -1, this is the index of the first of the
- non-option elements that the caller should itself scan.
-
- Otherwise, `optind' communicates from one call to the next
- how much of ARGV has been scanned so far. */
-
-extern int optind;
-
-/* Callers store zero here to inhibit the error message `getopt' prints
- for unrecognized options. */
-
-extern int opterr;
-
-/* Set to an option character which was unrecognized. */
-
-extern int optopt;
-
-/* Describe the long-named options requested by the application.
- The LONG_OPTIONS argument to getopt_long or getopt_long_only is a vector
- of `struct option' terminated by an element containing a name which is
- zero.
-
- The field `has_arg' is:
- no_argument (or 0) if the option does not take an argument,
- required_argument (or 1) if the option requires an argument,
- optional_argument (or 2) if the option takes an optional argument.
-
- If the field `flag' is not NULL, it points to a variable that is set
- to the value given in the field `val' when the option is found, but
- left unchanged if the option is not found.
-
- To have a long-named option do something other than set an `int' to
- a compiled-in constant, such as set a value from `optarg', set the
- option's `flag' field to zero and its `val' field to a nonzero
- value (the equivalent single-letter option character, if there is
- one). For long options that have a zero `flag' field, `getopt'
- returns the contents of the `val' field. */
-
-struct option {
-#if defined(__STDC__) && __STDC__
- const char *name;
-#else
- char *name;
-#endif
- /* has_arg can't be an enum because some compilers complain about
- type mismatches in all the code that assumes it is an int. */
- int has_arg;
- int *flag;
- int val;
-};
-
-/* Names for the values of the `has_arg' field of `struct option'. */
-
-#define no_argument 0
-#define required_argument 1
-#define optional_argument 2
-
-#if defined(__STDC__) && __STDC__
-
-#ifdef REALLY_NEED_PLAIN_GETOPT
-
-/*
- * getopt is defined in POSIX.2. Assume that if the system defines
- * getopt that it complies with POSIX.2. If not, an autoconf test
- * should be written to define NONPOSIX_GETOPT_DEFINITION.
- */
-#ifndef NONPOSIX_GETOPT_DEFINITION
-extern int getopt(int argc, char *const *argv, const char *shortopts);
-#else /* NONPOSIX_GETOPT_DEFINITION */
-extern int getopt(void);
-#endif /* NONPOSIX_GETOPT_DEFINITION */
-
-#endif
-
-
-extern int getopt_long(int argc, char *const *argv, const char *shortopts,
- const struct option *longopts, int *longind);
-extern int getopt_long_only(int argc, char *const *argv, const char *shortopts,
- const struct option *longopts, int *longind);
-
-/* Internal only. Users should not call this directly. */
-extern int _getopt_internal(int argc, char *const *argv, const char *shortopts,
- const struct option *longopts, int *longind,
- int long_only);
-#else /* not __STDC__ */
-
-#ifdef REALLY_NEED_PLAIN_GETOPT
-extern int getopt();
-#endif /* REALLY_NEED_PLAIN_GETOPT */
-
-extern int getopt_long();
-extern int getopt_long_only();
-
-extern int _getopt_internal();
-
-#endif /* __STDC__ */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* getopt.h */
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* getopt_long and getopt_long_only entry points for GNU getopt.
- * Copyright (C) 1987,88,89,90,91,92,93,94,96,97,98
- * Free Software Foundation, Inc.
- *
- * NOTE: The canonical source of this file is maintained with the GNU C Library.
- * Bugs can be reported to bug-glibc@gnu.org.
- */
-
-#include <zebra.h>
-#include "getopt.h"
-
-#if !defined __STDC__ || !__STDC__
-/* This is a separate conditional since some stdc systems
- reject `defined (const)'. */
-#ifndef const
-#define const
-#endif
-#endif
-
-#include <stdio.h>
-
-/* Comment out all this code if we are using the GNU C Library, and are not
- actually compiling the library itself. This code is part of the GNU C
- Library, but also included in many other GNU distributions. Compiling
- and linking in this code is a waste when using the GNU C library
- (especially if it is a shared library). Rather than having every GNU
- program understand `configure --with-gnu-libc' and omit the object files,
- it is simpler to just do this in the source for each such file. */
-
-#define GETOPT_INTERFACE_VERSION 2
-#if !defined _LIBC && defined __GLIBC__ && __GLIBC__ >= 2
-#include <gnu-versions.h>
-#if _GNU_GETOPT_INTERFACE_VERSION == GETOPT_INTERFACE_VERSION
-#define ELIDE_CODE
-#endif
-#endif
-
-#ifndef ELIDE_CODE
-
-
-/* This needs to come after some library #include
- to get __GNU_LIBRARY__ defined. */
-#ifdef __GNU_LIBRARY__
-#include <stdlib.h>
-#endif
-
-#ifndef NULL
-#define NULL 0
-#endif
-
-int getopt_long(argc, argv, options, long_options, opt_index) int argc;
-char *const *argv;
-const char *options;
-const struct option *long_options;
-int *opt_index;
-{
- return _getopt_internal(argc, argv, options, long_options, opt_index,
- 0);
-}
-
-/* Like getopt_long, but '-' as well as '--' can indicate a long option.
- If an option that starts with '-' (not '--') doesn't match a long option,
- but does match a short option, it is parsed as a short option
- instead. */
-
-int getopt_long_only(argc, argv, options, long_options, opt_index) int argc;
-char *const *argv;
-const char *options;
-const struct option *long_options;
-int *opt_index;
-{
- return _getopt_internal(argc, argv, options, long_options, opt_index,
- 1);
-}
-
-
-#endif /* Not ELIDE_CODE. */
-
-#ifdef TEST
-
-#include <stdio.h>
-
-int main(argc, argv) int argc;
-char **argv;
-{
- int c;
- int digit_optind = 0;
-
- while (1) {
- int this_option_optind = optind ? optind : 1;
- int option_index = 0;
- static struct option long_options[] = {
- {"add", 1, 0, 0}, {"append", 0, 0, 0},
- {"delete", 1, 0, 0}, {"verbose", 0, 0, 0},
- {"create", 0, 0, 0}, {"file", 1, 0, 0},
- {0, 0, 0, 0}};
-
- c = getopt_long(argc, argv, "abc:d:0123456789", long_options,
- &option_index);
- if (c == -1)
- break;
-
- switch (c) {
- case 0:
- printf("option %s", long_options[option_index].name);
- if (optarg)
- printf(" with arg %s", optarg);
- printf("\n");
- break;
-
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- if (digit_optind != 0
- && digit_optind != this_option_optind)
- printf("digits occur in two different argv-elements.\n");
- digit_optind = this_option_optind;
- printf("option %c\n", c);
- break;
-
- case 'a':
- printf("option a\n");
- break;
-
- case 'b':
- printf("option b\n");
- break;
-
- case 'c':
- printf("option c with value `%s'\n", optarg);
- break;
-
- case 'd':
- printf("option d with value `%s'\n", optarg);
- break;
-
- case '?':
- break;
-
- default:
- printf("?? getopt returned character code 0%o ??\n", c);
- }
- }
-
- if (optind < argc) {
- printf("non-option ARGV-elements: ");
- while (optind < argc)
- printf("%s ", argv[optind++]);
- printf("\n");
- }
-
- exit(0);
-}
-
-#endif /* TEST */
exit(0);
}
-struct thread_master *master;
+struct event_loop *master;
int main(int argc, char **argv)
{
- struct thread thread;
+ struct event event;
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
zlog_aux_init("NONE: ", LOG_DEBUG);
vty_stdio(vty_do_exit);
/* Fetch next active thread. */
- while (thread_fetch(master, &thread))
- thread_call(&thread);
+ while (event_fetch(master, &event))
+ event_call(&event);
/* Not reached. */
exit(0);
hash->stats.empty = hash->size;
}
+void hash_clean_and_free(struct hash **hash, void (*free_func)(void *))
+{
+ if (!*hash)
+ return;
+
+ hash_clean(*hash, free_func);
+ hash_free(*hash);
+ *hash = NULL;
+}
+
static void hash_to_list_iter(struct hash_bucket *hb, void *arg)
{
struct list *list = arg;
*/
extern void hash_clean(struct hash *hash, void (*free_func)(void *));
+/*
+ * Remove all elements from a hash table and free the table,
+ * setting the pointer to NULL.
+ *
+ * hash
+ * hash table to operate on
+ * free_func
+ * function to call with each removed item, intended to free the data
+ */
+extern void hash_clean_and_free(struct hash **hash, void (*free_func)(void *));
+
/*
* Delete a hash table.
*
void if_rmap_ctx_delete(struct if_rmap_ctx *ctx)
{
listnode_delete(if_rmap_ctx_list, ctx);
- hash_clean(ctx->ifrmaphash, (void (*)(void *))if_rmap_free);
+ hash_clean_and_free(&ctx->ifrmaphash, (void (*)(void *))if_rmap_free);
if (ctx->name)
XFREE(MTYPE_IF_RMAP_CTX_NAME, ctx);
XFREE(MTYPE_IF_RMAP_CTX, ctx);
#include "memory.h"
#include "prefix.h"
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "stream.h"
#include "zclient.h"
#include "table.h"
* update state
*/
if (ldp_sync_info && ldp_sync_info->enabled == LDP_IGP_SYNC_ENABLED) {
- THREAD_OFF(ldp_sync_info->t_holddown);
+ EVENT_OFF(ldp_sync_info->t_holddown);
if (ldp_sync_info->state == LDP_IGP_SYNC_STATE_REQUIRED_UP)
ldp_sync_info->state =
uint8_t enabled; /* enabled */
uint8_t state; /* running state */
uint16_t holddown; /* timer value */
- struct thread *t_holddown; /* holddown timer*/
+ struct event *t_holddown; /* holddown timer*/
uint32_t metric[2]; /* isis interface metric */
};
ret = nb_cli_pending_commit_check(vty);
zlog_info("Configuration Read in Took: %s", readin_time_str);
+ zlog_debug("%s: VTY:%p, pending SET-CFG: %u", __func__, vty,
+ (uint32_t)vty->mgmt_num_pending_setcfg);
+
+ /*
+ * If (and only if) we have sent any CLI config commands to MGMTd
+ * FE interface using vty_mgmt_send_config_data() without implicit
+ * commit before, should we need to send an explicit COMMIT-REQ now
+ * to apply all those commands at once.
+ */
+ if (vty->mgmt_num_pending_setcfg && vty_mgmt_fe_enabled())
+ vty_mgmt_send_commit_config(vty, false, false);
if (callback.end_config)
(*callback.end_config)();
#include "frrscript.h"
#include "systemd.h"
-DEFINE_HOOK(frr_early_init, (struct thread_master * tm), (tm));
-DEFINE_HOOK(frr_late_init, (struct thread_master * tm), (tm));
-DEFINE_HOOK(frr_config_pre, (struct thread_master * tm), (tm));
-DEFINE_HOOK(frr_config_post, (struct thread_master * tm), (tm));
+DEFINE_HOOK(frr_early_init, (struct event_loop * tm), (tm));
+DEFINE_HOOK(frr_late_init, (struct event_loop * tm), (tm));
+DEFINE_HOOK(frr_config_pre, (struct event_loop * tm), (tm));
+DEFINE_HOOK(frr_config_post, (struct event_loop * tm), (tm));
DEFINE_KOOH(frr_early_fini, (), ());
DEFINE_KOOH(frr_fini, (), ());
fprintf(stderr, "%s: %s\n", prefix, errstr);
}
-static struct thread_master *master;
-struct thread_master *frr_init(void)
+static struct event_loop *master;
+struct event_loop *frr_init(void)
{
struct option_chain *oc;
struct log_arg *log_arg;
zprivs_init(di->privs);
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
signal_init(master, di->n_signals, di->signals);
hook_call(frr_early_init, master);
* to read the config in after thread execution starts, so that
* we can match this behavior.
*/
-static void frr_config_read_in(struct thread *t)
+static void frr_config_read_in(struct event *t)
{
hook_call(frr_config_pre, master);
exit(0);
}
- thread_add_event(master, frr_config_read_in, NULL, 0,
- &di->read_in);
+ event_add_event(master, frr_config_read_in, NULL, 0,
+ &di->read_in);
}
if (di->daemon_mode || di->terminal)
}
}
-static struct thread *daemon_ctl_thread = NULL;
+static struct event *daemon_ctl_thread = NULL;
-static void frr_daemon_ctl(struct thread *t)
+static void frr_daemon_ctl(struct event *t)
{
char buf[1];
ssize_t nr;
}
out:
- thread_add_read(master, frr_daemon_ctl, NULL, daemon_ctl_sock,
- &daemon_ctl_thread);
+ event_add_read(master, frr_daemon_ctl, NULL, daemon_ctl_sock,
+ &daemon_ctl_thread);
}
void frr_detach(void)
frr_check_detach();
}
-void frr_run(struct thread_master *master)
+void frr_run(struct event_loop *master)
{
char instanceinfo[64] = "";
vty_stdio(frr_terminal_close);
if (daemon_ctl_sock != -1) {
set_nonblocking(daemon_ctl_sock);
- thread_add_read(master, frr_daemon_ctl, NULL,
- daemon_ctl_sock, &daemon_ctl_thread);
+ event_add_read(master, frr_daemon_ctl, NULL,
+ daemon_ctl_sock, &daemon_ctl_thread);
}
} else if (di->daemon_mode) {
int nullfd = open("/dev/null", O_RDONLY | O_NOCTTY);
/* end fixed stderr startup logging */
zlog_startup_end();
- struct thread thread;
- while (thread_fetch(master, &thread))
- thread_call(&thread);
+ struct event thread;
+ while (event_fetch(master, &thread))
+ event_call(&thread);
}
void frr_early_fini(void)
frr_pthread_finish();
zprivs_terminate(di->privs);
/* signal_init -> nothing needed */
- thread_master_free(master);
+ event_master_free(master);
master = NULL;
zlog_tls_buffer_fini();
zlog_fini();
#include "typesafe.h"
#include "sigevent.h"
#include "privs.h"
-#include "thread.h"
+#include "frrevent.h"
#include "log.h"
#include "getopt.h"
#include "module.h"
bool terminal;
enum frr_cli_mode cli_mode;
- struct thread *read_in;
+ struct event *read_in;
const char *config_file;
const char *backup_config_file;
const char *pid_file;
extern __attribute__((__noreturn__)) void frr_help_exit(int status);
-extern struct thread_master *frr_init(void);
+extern struct event_loop *frr_init(void);
extern const char *frr_get_progname(void);
extern enum frr_cli_mode frr_get_cli_mode(void);
extern uint32_t frr_get_fd_limit(void);
extern bool frr_is_startup_fd(int fd);
/* call order of these hooks is as ordered here */
-DECLARE_HOOK(frr_early_init, (struct thread_master * tm), (tm));
-DECLARE_HOOK(frr_late_init, (struct thread_master * tm), (tm));
+DECLARE_HOOK(frr_early_init, (struct event_loop * tm), (tm));
+DECLARE_HOOK(frr_late_init, (struct event_loop * tm), (tm));
/* fork() happens between late_init and config_pre */
-DECLARE_HOOK(frr_config_pre, (struct thread_master * tm), (tm));
-DECLARE_HOOK(frr_config_post, (struct thread_master * tm), (tm));
+DECLARE_HOOK(frr_config_pre, (struct event_loop * tm), (tm));
+DECLARE_HOOK(frr_config_post, (struct event_loop * tm), (tm));
extern void frr_config_fork(void);
-extern void frr_run(struct thread_master *master);
+extern void frr_run(struct event_loop *master);
extern void frr_detach(void);
extern bool frr_zclient_addr(struct sockaddr_storage *sa, socklen_t *sa_len,
#include <lttng/tracepoint.h>
#include "hash.h"
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "table.h"
TRACEPOINT_LOGLEVEL(frr_libfrr, hash_release, TRACE_INFO)
#define THREAD_SCHEDULE_ARGS \
- TP_ARGS(struct thread_master *, master, const char *, funcname, \
- const char *, schedfrom, int, fromln, struct thread **, \
+ TP_ARGS(struct event_loop *, master, const char *, funcname, \
+ const char *, schedfrom, int, fromln, struct event **, \
thread_ptr, int, fd, int, val, void *, arg, long, time)
TRACEPOINT_EVENT_CLASS(
THREAD_OPERATION_TRACEPOINT_INSTANCE(schedule_event)
THREAD_OPERATION_TRACEPOINT_INSTANCE(schedule_read)
THREAD_OPERATION_TRACEPOINT_INSTANCE(schedule_write)
-THREAD_OPERATION_TRACEPOINT_INSTANCE(thread_cancel)
-THREAD_OPERATION_TRACEPOINT_INSTANCE(thread_cancel_async)
-THREAD_OPERATION_TRACEPOINT_INSTANCE(thread_call)
+THREAD_OPERATION_TRACEPOINT_INSTANCE(event_cancel)
+THREAD_OPERATION_TRACEPOINT_INSTANCE(event_cancel_async)
+THREAD_OPERATION_TRACEPOINT_INSTANCE(event_call)
TRACEPOINT_EVENT(
frr_libfrr,
fb.pos = buf;
- struct thread *tc;
+ struct event *tc;
tc = pthread_getspecific(thread_current);
if (!tc)
void zlog_thread_info(int log_level)
{
- struct thread *tc;
+ struct event *tc;
tc = pthread_getspecific(thread_current);
if (tc)
--- /dev/null
+// SPDX-License-Identifier: ISC
+//
+// mgmt.proto
+//
+// @copyright Copyright (C) 2021 Vmware, Inc.
+//
+// @author Pushpasis Sarkar <spushpasis@vmware.com>
+//
+
+syntax = "proto2";
+
+//
+// Protobuf definitions pertaining to the MGMTD component.
+//
+
+package mgmtd;
+
+//
+// Common Sub-Messages
+//
+
+message YangDataXPath {
+ required string xpath = 1;
+}
+
+message YangDataValue {
+ oneof value {
+ //
+ // NOTE: For now let's use stringized value ONLY.
+ // We will enhance it later to pass native-format
+ // if needed.
+ //
+ // bool bool_val = 2;
+ // double double_val = 3;
+ // float float_val = 4;
+ // string string_val = 5;
+ // bytes bytes_val = 6;
+ // int32 int32_val = 7;
+ // int64 int64_val = 8;
+ // uint32 uint32_val = 9;
+ // uint64 uint64_val = 10;
+ // int32 int8_val = 11;
+ // uint32 uint8_val = 12;
+ // int32 int16_val = 13;
+ // uint32 uint16_val = 14;
+ string encoded_str_val = 100;
+ }
+}
+
+message YangData {
+ required string xpath = 1;
+ optional YangDataValue value = 2;
+}
+
+enum CfgDataReqType {
+ REQ_TYPE_NONE = 0;
+ SET_DATA = 1;
+ DELETE_DATA = 2;
+}
+
+message YangCfgDataReq {
+ required YangData data = 1;
+ required CfgDataReqType req_type = 2;
+}
+
+message YangGetDataReq {
+ required YangData data = 1;
+ required int64 next_indx = 2;
+}
+
+//
+// Backend Interface Messages
+//
+message BeSubscribeReq {
+ required string client_name = 1;
+ required bool subscribe_xpaths = 2;
+ repeated string xpath_reg = 3;
+}
+
+message BeSubscribeReply {
+ required bool success = 1;
+}
+
+message BeTxnReq {
+ required uint64 txn_id = 1;
+ required bool create = 2;
+}
+
+message BeTxnReply {
+ required uint64 txn_id = 1;
+ required bool create = 2;
+ required bool success = 3;
+}
+
+message BeCfgDataCreateReq {
+ required uint64 txn_id = 1;
+ required uint64 batch_id = 2;
+ repeated YangCfgDataReq data_req = 3;
+ required bool end_of_data = 4;
+}
+
+message BeCfgDataCreateReply {
+ required uint64 txn_id = 1;
+ required uint64 batch_id = 2;
+ required bool success = 3;
+ optional string error_if_any = 4;
+}
+
+message BeCfgDataApplyReq {
+ required uint64 txn_id = 1;
+}
+
+message BeCfgDataApplyReply {
+ required uint64 txn_id = 1;
+ repeated uint64 batch_ids = 2;
+ required bool success = 3;
+ optional string error_if_any = 4;
+}
+
+message BeOperDataGetReq {
+ required uint64 txn_id = 1;
+ required uint64 batch_id = 2;
+ repeated YangGetDataReq data = 3;
+}
+
+message YangDataReply {
+ repeated YangData data = 1;
+ required int64 next_indx = 2;
+}
+
+message BeOperDataGetReply {
+ required uint64 txn_id = 1;
+ required uint64 batch_id = 2;
+ required bool success = 3;
+ optional string error = 4;
+ optional YangDataReply data = 5;
+}
+
+message BeOperDataNotify {
+ required YangDataReply data = 5;
+}
+
+message BeConfigCmdReq {
+ required string cmd = 1;
+}
+
+message BeConfigCmdReply {
+ required bool success = 1;
+ required string error_if_any = 2;
+}
+
+message BeShowCmdReq {
+ required string cmd = 1;
+}
+
+message BeShowCmdReply {
+ required bool success = 1;
+ required string cmd_ouput = 2;
+}
+
+//
+// Any message on the MGMTD Backend Interface.
+//
+message BeMessage {
+ oneof message {
+ BeSubscribeReq subscr_req = 2;
+ BeSubscribeReply subscr_reply = 3;
+ BeTxnReq txn_req = 4;
+ BeTxnReply txn_reply = 5;
+ BeCfgDataCreateReq cfg_data_req = 6;
+ BeCfgDataCreateReply cfg_data_reply = 7;
+ BeCfgDataApplyReq cfg_apply_req = 8;
+ BeCfgDataApplyReply cfg_apply_reply = 9;
+ BeOperDataGetReq get_req = 10;
+ BeOperDataGetReply get_reply = 11;
+ BeOperDataNotify notify_data = 12;
+ BeConfigCmdReq cfg_cmd_req = 13;
+ BeConfigCmdReply cfg_cmd_reply = 14;
+ BeShowCmdReq show_cmd_req = 15;
+ BeShowCmdReply show_cmd_reply = 16;
+ }
+}
+
+
+//
+// Frontend Interface Messages
+//
+
+message FeRegisterReq {
+ required string client_name = 1;
+}
+
+message FeSessionReq {
+ required bool create = 1;
+ oneof id {
+ uint64 client_conn_id = 2; // Applicable for create request only
+ uint64 session_id = 3; // Applicable for delete request only
+ }
+}
+
+message FeSessionReply {
+ required bool create = 1;
+ required bool success = 2;
+ optional uint64 client_conn_id = 3; // Applicable for create request only
+ required uint64 session_id = 4;
+}
+
+enum DatastoreId {
+ DS_NONE = 0;
+ RUNNING_DS = 1;
+ CANDIDATE_DS = 2;
+ OPERATIONAL_DS = 3;
+ STARTUP_DS = 4;
+}
+
+message FeLockDsReq {
+ required uint64 session_id = 1;
+ required uint64 req_id = 2;
+ required DatastoreId ds_id = 3;
+ required bool lock = 4;
+}
+
+message FeLockDsReply {
+ required uint64 session_id = 1;
+ required uint64 req_id = 2;
+ required DatastoreId ds_id = 3;
+ required bool lock = 4;
+ required bool success = 5;
+ optional string error_if_any = 6;
+}
+
+message FeSetConfigReq {
+ required uint64 session_id = 1;
+ required DatastoreId ds_id = 2;
+ required uint64 req_id = 3;
+ repeated YangCfgDataReq data = 4;
+ required bool implicit_commit = 5;
+ required DatastoreId commit_ds_id = 6;
+}
+
+message FeSetConfigReply {
+ required uint64 session_id = 1;
+ required DatastoreId ds_id = 2;
+ required uint64 req_id = 3;
+ required bool success = 4;
+ optional string error_if_any = 5;
+}
+
+message FeCommitConfigReq {
+ required uint64 session_id = 1;
+ required DatastoreId src_ds_id = 2;
+ required DatastoreId dst_ds_id = 3;
+ required uint64 req_id = 4;
+ required bool validate_only = 5;
+ required bool abort = 6;
+}
+
+message FeCommitConfigReply {
+ required uint64 session_id = 1;
+ required DatastoreId src_ds_id = 2;
+ required DatastoreId dst_ds_id = 3;
+ required uint64 req_id = 4;
+ required bool validate_only = 5;
+ required bool success = 6;
+ required bool abort = 7;
+ optional string error_if_any = 8;
+}
+
+message FeGetConfigReq {
+ required uint64 session_id = 1;
+ required DatastoreId ds_id = 2;
+ required uint64 req_id = 3;
+ repeated YangGetDataReq data = 4;
+}
+
+message FeGetConfigReply {
+ required uint64 session_id = 1;
+ required DatastoreId ds_id = 2;
+ required uint64 req_id = 3;
+ required bool success = 4;
+ optional string error_if_any = 5;
+ optional YangDataReply data = 6;
+}
+
+message FeGetDataReq {
+ required uint64 session_id = 1;
+ required DatastoreId ds_id = 2;
+ required uint64 req_id = 3;
+ repeated YangGetDataReq data = 4;
+}
+
+message FeGetDataReply {
+ required uint64 session_id = 1;
+ required DatastoreId ds_id = 2;
+ required uint64 req_id = 3;
+ required bool success = 4;
+ optional string error_if_any = 5;
+ optional YangDataReply data = 6;
+}
+
+message FeNotifyDataReq {
+ repeated YangData data = 1;
+}
+
+message FeRegisterNotifyReq {
+ required uint64 session_id = 1;
+ required DatastoreId ds_id = 2;
+ required bool register_req = 3;
+ required uint64 req_id = 4;
+ repeated YangDataXPath data_xpath = 5;
+}
+
+message FeMessage {
+ oneof message {
+ FeRegisterReq register_req = 2;
+ FeSessionReq session_req = 3;
+ FeSessionReply session_reply = 4;
+ FeLockDsReq lockds_req = 5;
+ FeLockDsReply lockds_reply = 6;
+ FeSetConfigReq setcfg_req = 7;
+ FeSetConfigReply setcfg_reply = 8;
+ FeCommitConfigReq commcfg_req = 9;
+ FeCommitConfigReply commcfg_reply = 10;
+ FeGetConfigReq getcfg_req = 11;
+ FeGetConfigReply getcfg_reply = 12;
+ FeGetDataReq getdata_req = 13;
+ FeGetDataReply getdata_reply = 14;
+ FeNotifyDataReq notify_data_req = 15;
+ FeRegisterNotifyReq regnotify_req = 16;
+ }
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Client Library api interfaces
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "libfrr.h"
+#include "mgmtd/mgmt.h"
+#include "mgmt_be_client.h"
+#include "mgmt_msg.h"
+#include "mgmt_pb.h"
+#include "network.h"
+#include "stream.h"
+#include "sockopt.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_BE_CLIENT_DBG(fmt, ...) \
+ fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_BE_CLIENT_ERR(fmt, ...) \
+ fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_BE_CLIENT_DBG(fmt, ...) \
+ do { \
+ if (mgmt_debug_be_client) \
+ zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define MGMTD_BE_CLIENT_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_BATCH,
+ "MGMTD backend transaction batch data");
+DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_TXN, "MGMTD backend transaction data");
+
+enum mgmt_be_txn_event {
+ MGMTD_BE_TXN_PROC_SETCFG = 1,
+ MGMTD_BE_TXN_PROC_GETCFG,
+ MGMTD_BE_TXN_PROC_GETDATA
+};
+
+struct mgmt_be_set_cfg_req {
+ struct nb_cfg_change cfg_changes[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ uint16_t num_cfg_changes;
+};
+
+struct mgmt_be_get_data_req {
+ char *xpaths[MGMTD_MAX_NUM_DATA_REQ_IN_BATCH];
+ uint16_t num_xpaths;
+};
+
+struct mgmt_be_txn_req {
+ enum mgmt_be_txn_event event;
+ union {
+ struct mgmt_be_set_cfg_req set_cfg;
+ struct mgmt_be_get_data_req get_data;
+ } req;
+};
+
+PREDECL_LIST(mgmt_be_batches);
+struct mgmt_be_batch_ctx {
+ /* Batch-Id as assigned by MGMTD */
+ uint64_t batch_id;
+
+ struct mgmt_be_txn_req txn_req;
+
+ uint32_t flags;
+
+ struct mgmt_be_batches_item list_linkage;
+};
+#define MGMTD_BE_BATCH_FLAGS_CFG_PREPARED (1U << 0)
+#define MGMTD_BE_TXN_FLAGS_CFG_APPLIED (1U << 1)
+DECLARE_LIST(mgmt_be_batches, struct mgmt_be_batch_ctx, list_linkage);
+
+struct mgmt_be_client_ctx;
+
+PREDECL_LIST(mgmt_be_txns);
+struct mgmt_be_txn_ctx {
+ /* Txn-Id as assigned by MGMTD */
+ uint64_t txn_id;
+ uint32_t flags;
+
+ struct mgmt_be_client_txn_ctx client_data;
+ struct mgmt_be_client_ctx *client_ctx;
+
+ /* List of batches belonging to this transaction */
+ struct mgmt_be_batches_head cfg_batches;
+ struct mgmt_be_batches_head apply_cfgs;
+
+ struct mgmt_be_txns_item list_linkage;
+
+ struct nb_transaction *nb_txn;
+ uint32_t nb_txn_id;
+};
+#define MGMTD_BE_TXN_FLAGS_CFGPREP_FAILED (1U << 1)
+
+DECLARE_LIST(mgmt_be_txns, struct mgmt_be_txn_ctx, list_linkage);
+
+#define FOREACH_BE_TXN_BATCH_IN_LIST(txn, batch) \
+ frr_each_safe (mgmt_be_batches, &(txn)->cfg_batches, (batch))
+
+#define FOREACH_BE_APPLY_BATCH_IN_LIST(txn, batch) \
+ frr_each_safe (mgmt_be_batches, &(txn)->apply_cfgs, (batch))
+
+struct mgmt_be_client_ctx {
+ int conn_fd;
+ struct event_loop *tm;
+ struct event *conn_retry_tmr;
+ struct event *conn_read_ev;
+ struct event *conn_write_ev;
+ struct event *conn_writes_on;
+ struct event *msg_proc_ev;
+ uint32_t flags;
+
+ struct mgmt_msg_state mstate;
+
+ struct nb_config *candidate_config;
+ struct nb_config *running_config;
+
+ unsigned long num_batch_find;
+ unsigned long avg_batch_find_tm;
+ unsigned long num_edit_nb_cfg;
+ unsigned long avg_edit_nb_cfg_tm;
+ unsigned long num_prep_nb_cfg;
+ unsigned long avg_prep_nb_cfg_tm;
+ unsigned long num_apply_nb_cfg;
+ unsigned long avg_apply_nb_cfg_tm;
+
+ struct mgmt_be_txns_head txn_head;
+ struct mgmt_be_client_params client_params;
+};
+
+#define MGMTD_BE_CLIENT_FLAGS_WRITES_OFF (1U << 0)
+
+#define FOREACH_BE_TXN_IN_LIST(client_ctx, txn) \
+ frr_each_safe (mgmt_be_txns, &(client_ctx)->txn_head, (txn))
+
+static bool mgmt_debug_be_client;
+
+static struct mgmt_be_client_ctx mgmt_be_client_ctx = {
+ .conn_fd = -1,
+};
+
+const char *mgmt_be_client_names[MGMTD_BE_CLIENT_ID_MAX + 1] = {
+#ifdef HAVE_STATICD
+ [MGMTD_BE_CLIENT_ID_STATICD] = "staticd",
+#endif
+ [MGMTD_BE_CLIENT_ID_MAX] = "Unknown/Invalid",
+};
+
+/* Forward declarations */
+static void
+mgmt_be_client_register_event(struct mgmt_be_client_ctx *client_ctx,
+ enum mgmt_be_event event);
+static void
+mgmt_be_client_schedule_conn_retry(struct mgmt_be_client_ctx *client_ctx,
+ unsigned long intvl_secs);
+static int mgmt_be_client_send_msg(struct mgmt_be_client_ctx *client_ctx,
+ Mgmtd__BeMessage *be_msg);
+
+static void
+mgmt_be_server_disconnect(struct mgmt_be_client_ctx *client_ctx,
+ bool reconnect)
+{
+ /* Notify client through registered callback (if any) */
+ if (client_ctx->client_params.client_connect_notify)
+ (void)(*client_ctx->client_params.client_connect_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data, false);
+
+ if (client_ctx->conn_fd != -1) {
+ close(client_ctx->conn_fd);
+ client_ctx->conn_fd = -1;
+ }
+
+ if (reconnect)
+ mgmt_be_client_schedule_conn_retry(
+ client_ctx,
+ client_ctx->client_params.conn_retry_intvl_sec);
+}
+
+static struct mgmt_be_batch_ctx *
+mgmt_be_find_batch_by_id(struct mgmt_be_txn_ctx *txn,
+ uint64_t batch_id)
+{
+ struct mgmt_be_batch_ctx *batch = NULL;
+
+ FOREACH_BE_TXN_BATCH_IN_LIST (txn, batch) {
+ if (batch->batch_id == batch_id)
+ return batch;
+ }
+
+ return NULL;
+}
+
+static struct mgmt_be_batch_ctx *
+mgmt_be_batch_create(struct mgmt_be_txn_ctx *txn, uint64_t batch_id)
+{
+ struct mgmt_be_batch_ctx *batch = NULL;
+
+ batch = mgmt_be_find_batch_by_id(txn, batch_id);
+ if (!batch) {
+ batch = XCALLOC(MTYPE_MGMTD_BE_BATCH,
+ sizeof(struct mgmt_be_batch_ctx));
+ assert(batch);
+
+ batch->batch_id = batch_id;
+ mgmt_be_batches_add_tail(&txn->cfg_batches, batch);
+
+ MGMTD_BE_CLIENT_DBG("Added new batch 0x%llx to transaction",
+ (unsigned long long)batch_id);
+ }
+
+ return batch;
+}
+
+static void mgmt_be_batch_delete(struct mgmt_be_txn_ctx *txn,
+ struct mgmt_be_batch_ctx **batch)
+{
+ uint16_t indx;
+
+ if (!batch)
+ return;
+
+ mgmt_be_batches_del(&txn->cfg_batches, *batch);
+ if ((*batch)->txn_req.event == MGMTD_BE_TXN_PROC_SETCFG) {
+ for (indx = 0; indx < MGMTD_MAX_CFG_CHANGES_IN_BATCH; indx++) {
+ if ((*batch)->txn_req.req.set_cfg.cfg_changes[indx]
+ .value) {
+ free((char *)(*batch)
+ ->txn_req.req.set_cfg
+ .cfg_changes[indx]
+ .value);
+ }
+ }
+ }
+
+ XFREE(MTYPE_MGMTD_BE_BATCH, *batch);
+ *batch = NULL;
+}
+
+static void mgmt_be_cleanup_all_batches(struct mgmt_be_txn_ctx *txn)
+{
+ struct mgmt_be_batch_ctx *batch = NULL;
+
+ FOREACH_BE_TXN_BATCH_IN_LIST (txn, batch) {
+ mgmt_be_batch_delete(txn, &batch);
+ }
+
+ FOREACH_BE_APPLY_BATCH_IN_LIST (txn, batch) {
+ mgmt_be_batch_delete(txn, &batch);
+ }
+}
+
+static struct mgmt_be_txn_ctx *
+mgmt_be_find_txn_by_id(struct mgmt_be_client_ctx *client_ctx,
+ uint64_t txn_id)
+{
+ struct mgmt_be_txn_ctx *txn = NULL;
+
+ FOREACH_BE_TXN_IN_LIST (client_ctx, txn) {
+ if (txn->txn_id == txn_id)
+ return txn;
+ }
+
+ return NULL;
+}
+
+static struct mgmt_be_txn_ctx *
+mgmt_be_txn_create(struct mgmt_be_client_ctx *client_ctx,
+ uint64_t txn_id)
+{
+ struct mgmt_be_txn_ctx *txn = NULL;
+
+ txn = mgmt_be_find_txn_by_id(client_ctx, txn_id);
+ if (!txn) {
+ txn = XCALLOC(MTYPE_MGMTD_BE_TXN,
+ sizeof(struct mgmt_be_txn_ctx));
+ assert(txn);
+
+ txn->txn_id = txn_id;
+ txn->client_ctx = client_ctx;
+ mgmt_be_batches_init(&txn->cfg_batches);
+ mgmt_be_batches_init(&txn->apply_cfgs);
+ mgmt_be_txns_add_tail(&client_ctx->txn_head, txn);
+
+ MGMTD_BE_CLIENT_DBG("Added new transaction 0x%llx",
+ (unsigned long long)txn_id);
+ }
+
+ return txn;
+}
+
+static void mgmt_be_txn_delete(struct mgmt_be_client_ctx *client_ctx,
+ struct mgmt_be_txn_ctx **txn)
+{
+ char err_msg[] = "MGMT Transaction Delete";
+
+ if (!txn)
+ return;
+
+ /*
+ * Remove the transaction from the list of transactions
+ * so that future lookups with the same transaction id
+ * does not return this one.
+ */
+ mgmt_be_txns_del(&client_ctx->txn_head, *txn);
+
+ /*
+ * Time to delete the transaction which should also
+ * take care of cleaning up all batches created via
+ * CFGDATA_CREATE_REQs. But first notify the client
+ * about the transaction delete.
+ */
+ if (client_ctx->client_params.txn_notify)
+ (void)(*client_ctx->client_params
+ .txn_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data,
+ &(*txn)->client_data, true);
+
+ mgmt_be_cleanup_all_batches(*txn);
+ if ((*txn)->nb_txn)
+ nb_candidate_commit_abort((*txn)->nb_txn, err_msg,
+ sizeof(err_msg));
+ XFREE(MTYPE_MGMTD_BE_TXN, *txn);
+
+ *txn = NULL;
+}
+
+static void
+mgmt_be_cleanup_all_txns(struct mgmt_be_client_ctx *client_ctx)
+{
+ struct mgmt_be_txn_ctx *txn = NULL;
+
+ FOREACH_BE_TXN_IN_LIST (client_ctx, txn) {
+ mgmt_be_txn_delete(client_ctx, &txn);
+ }
+}
+
+static int mgmt_be_send_txn_reply(struct mgmt_be_client_ctx *client_ctx,
+ uint64_t txn_id, bool create,
+ bool success)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeTxnReply txn_reply;
+
+ mgmtd__be_txn_reply__init(&txn_reply);
+ txn_reply.create = create;
+ txn_reply.txn_id = txn_id;
+ txn_reply.success = success;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_TXN_REPLY;
+ be_msg.txn_reply = &txn_reply;
+
+ MGMTD_BE_CLIENT_DBG(
+ "Sending TXN_REPLY message to MGMTD for txn 0x%llx",
+ (unsigned long long)txn_id);
+
+ return mgmt_be_client_send_msg(client_ctx, &be_msg);
+}
+
+static int mgmt_be_process_txn_req(struct mgmt_be_client_ctx *client_ctx,
+ uint64_t txn_id, bool create)
+{
+ struct mgmt_be_txn_ctx *txn;
+
+ txn = mgmt_be_find_txn_by_id(client_ctx, txn_id);
+ if (create) {
+ if (txn) {
+ /*
+ * Transaction with same txn-id already exists.
+ * Should not happen under any circumstances.
+ */
+ MGMTD_BE_CLIENT_ERR(
+ "Transaction 0x%llx already exists!!!",
+ (unsigned long long)txn_id);
+ mgmt_be_send_txn_reply(client_ctx, txn_id, create,
+ false);
+ }
+
+ MGMTD_BE_CLIENT_DBG("Created new transaction 0x%llx",
+ (unsigned long long)txn_id);
+ txn = mgmt_be_txn_create(client_ctx, txn_id);
+
+ if (client_ctx->client_params.txn_notify)
+ (void)(*client_ctx->client_params
+ .txn_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data,
+ &txn->client_data, false);
+ } else {
+ if (!txn) {
+ /*
+ * Transaction with same txn-id does not exists.
+ * Return sucess anyways.
+ */
+ MGMTD_BE_CLIENT_DBG(
+ "Transaction to delete 0x%llx does NOT exists!!!",
+ (unsigned long long)txn_id);
+ } else {
+ MGMTD_BE_CLIENT_DBG("Delete transaction 0x%llx",
+ (unsigned long long)txn_id);
+ mgmt_be_txn_delete(client_ctx, &txn);
+ }
+ }
+
+ mgmt_be_send_txn_reply(client_ctx, txn_id, create, true);
+
+ return 0;
+}
+
+static int
+mgmt_be_send_cfgdata_create_reply(struct mgmt_be_client_ctx *client_ctx,
+ uint64_t txn_id, uint64_t batch_id,
+ bool success, const char *error_if_any)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeCfgDataCreateReply cfgdata_reply;
+
+ mgmtd__be_cfg_data_create_reply__init(&cfgdata_reply);
+ cfgdata_reply.txn_id = (uint64_t)txn_id;
+ cfgdata_reply.batch_id = (uint64_t)batch_id;
+ cfgdata_reply.success = success;
+ if (error_if_any)
+ cfgdata_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REPLY;
+ be_msg.cfg_data_reply = &cfgdata_reply;
+
+ MGMTD_BE_CLIENT_DBG(
+ "Sending CFGDATA_CREATE_REPLY message to MGMTD for txn 0x%llx batch 0x%llx",
+ (unsigned long long)txn_id, (unsigned long long)batch_id);
+
+ return mgmt_be_client_send_msg(client_ctx, &be_msg);
+}
+
+static void mgmt_be_txn_cfg_abort(struct mgmt_be_txn_ctx *txn)
+{
+ char errmsg[BUFSIZ] = {0};
+
+ assert(txn && txn->client_ctx);
+ if (txn->nb_txn) {
+ MGMTD_BE_CLIENT_ERR(
+ "Aborting configurations after prep for Txn 0x%llx",
+ (unsigned long long)txn->txn_id);
+ nb_candidate_commit_abort(txn->nb_txn, errmsg, sizeof(errmsg));
+ txn->nb_txn = 0;
+ }
+
+ /*
+ * revert candidate back to running
+ *
+ * This is one txn ctx but the candidate_config is per client ctx, how
+ * does that work?
+ */
+ MGMTD_BE_CLIENT_DBG(
+ "Reset candidate configurations after abort of Txn 0x%llx",
+ (unsigned long long)txn->txn_id);
+ nb_config_replace(txn->client_ctx->candidate_config,
+ txn->client_ctx->running_config, true);
+}
+
+static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn)
+{
+ struct mgmt_be_client_ctx *client_ctx;
+ struct mgmt_be_txn_req *txn_req = NULL;
+ struct nb_context nb_ctx = {0};
+ struct timeval edit_nb_cfg_start;
+ struct timeval edit_nb_cfg_end;
+ unsigned long edit_nb_cfg_tm;
+ struct timeval prep_nb_cfg_start;
+ struct timeval prep_nb_cfg_end;
+ unsigned long prep_nb_cfg_tm;
+ struct mgmt_be_batch_ctx *batch;
+ bool error;
+ char err_buf[BUFSIZ];
+ size_t num_processed;
+ bool debug_be = mgmt_debug_be_client;
+ int err;
+
+ assert(txn && txn->client_ctx);
+ client_ctx = txn->client_ctx;
+
+ num_processed = 0;
+ FOREACH_BE_TXN_BATCH_IN_LIST (txn, batch) {
+ txn_req = &batch->txn_req;
+ error = false;
+ nb_ctx.client = NB_CLIENT_CLI;
+ nb_ctx.user = (void *)client_ctx->client_params.user_data;
+
+ if (!txn->nb_txn) {
+ /*
+ * This happens when the current backend client is only
+ * interested in consuming the config items but is not
+ * interested in validating it.
+ */
+ error = false;
+ if (debug_be)
+ gettimeofday(&edit_nb_cfg_start, NULL);
+ nb_candidate_edit_config_changes(
+ client_ctx->candidate_config,
+ txn_req->req.set_cfg.cfg_changes,
+ (size_t)txn_req->req.set_cfg.num_cfg_changes,
+ NULL, NULL, 0, err_buf, sizeof(err_buf),
+ &error);
+ if (error) {
+ err_buf[sizeof(err_buf) - 1] = 0;
+ MGMTD_BE_CLIENT_ERR(
+ "Failed to update configs for Txn %llx Batch %llx to Candidate! Err: '%s'",
+ (unsigned long long)txn->txn_id,
+ (unsigned long long)batch->batch_id,
+ err_buf);
+ return -1;
+ }
+ if (debug_be) {
+ gettimeofday(&edit_nb_cfg_end, NULL);
+ edit_nb_cfg_tm = timeval_elapsed(
+ edit_nb_cfg_end, edit_nb_cfg_start);
+ client_ctx->avg_edit_nb_cfg_tm =
+ ((client_ctx->avg_edit_nb_cfg_tm
+ * client_ctx->num_edit_nb_cfg)
+ + edit_nb_cfg_tm)
+ / (client_ctx->num_edit_nb_cfg + 1);
+ }
+ client_ctx->num_edit_nb_cfg++;
+ }
+
+ num_processed++;
+ }
+
+ if (!num_processed)
+ return 0;
+
+ /*
+ * Now prepare all the batches we have applied in one go.
+ */
+ nb_ctx.client = NB_CLIENT_CLI;
+ nb_ctx.user = (void *)client_ctx->client_params.user_data;
+ if (debug_be)
+ gettimeofday(&prep_nb_cfg_start, NULL);
+ err = nb_candidate_commit_prepare(nb_ctx, client_ctx->candidate_config,
+ "MGMTD Backend Txn", &txn->nb_txn,
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ true, true,
+#else
+ false, true,
+#endif
+ err_buf, sizeof(err_buf) - 1);
+ if (err != NB_OK) {
+ err_buf[sizeof(err_buf) - 1] = 0;
+ if (err == NB_ERR_VALIDATION)
+ MGMTD_BE_CLIENT_ERR(
+ "Failed to validate configs for Txn %llx %u Batches! Err: '%s'",
+ (unsigned long long)txn->txn_id,
+ (uint32_t)num_processed, err_buf);
+ else
+ MGMTD_BE_CLIENT_ERR(
+ "Failed to prepare configs for Txn %llx, %u Batches! Err: '%s'",
+ (unsigned long long)txn->txn_id,
+ (uint32_t)num_processed, err_buf);
+ error = true;
+ SET_FLAG(txn->flags, MGMTD_BE_TXN_FLAGS_CFGPREP_FAILED);
+ } else
+ MGMTD_BE_CLIENT_DBG(
+ "Prepared configs for Txn %llx, %u Batches! successfully!",
+ (unsigned long long)txn->txn_id,
+ (uint32_t)num_processed);
+ if (debug_be) {
+ gettimeofday(&prep_nb_cfg_end, NULL);
+ prep_nb_cfg_tm =
+ timeval_elapsed(prep_nb_cfg_end, prep_nb_cfg_start);
+ client_ctx->avg_prep_nb_cfg_tm =
+ ((client_ctx->avg_prep_nb_cfg_tm
+ * client_ctx->num_prep_nb_cfg)
+ + prep_nb_cfg_tm)
+ / (client_ctx->num_prep_nb_cfg + 1);
+ }
+ client_ctx->num_prep_nb_cfg++;
+
+ FOREACH_BE_TXN_BATCH_IN_LIST (txn, batch) {
+ mgmt_be_send_cfgdata_create_reply(
+ client_ctx, txn->txn_id, batch->batch_id,
+ error ? false : true, error ? err_buf : NULL);
+ if (!error) {
+ SET_FLAG(batch->flags,
+ MGMTD_BE_BATCH_FLAGS_CFG_PREPARED);
+ mgmt_be_batches_del(&txn->cfg_batches, batch);
+ mgmt_be_batches_add_tail(&txn->apply_cfgs, batch);
+ }
+ }
+
+ if (debug_be)
+ MGMTD_BE_CLIENT_DBG(
+ "Avg-nb-edit-duration %lu uSec, nb-prep-duration %lu (avg: %lu) uSec, batch size %u",
+ client_ctx->avg_edit_nb_cfg_tm, prep_nb_cfg_tm,
+ client_ctx->avg_prep_nb_cfg_tm, (uint32_t)num_processed);
+
+ if (error)
+ mgmt_be_txn_cfg_abort(txn);
+
+ return 0;
+}
+
+/*
+ * Process all CFG_DATA_REQs received so far and prepare them all in one go.
+ */
+static int
+mgmt_be_update_setcfg_in_batch(struct mgmt_be_client_ctx *client_ctx,
+ struct mgmt_be_txn_ctx *txn,
+ uint64_t batch_id,
+ Mgmtd__YangCfgDataReq * cfg_req[],
+ int num_req)
+{
+ struct mgmt_be_batch_ctx *batch = NULL;
+ struct mgmt_be_txn_req *txn_req = NULL;
+ int index;
+ struct nb_cfg_change *cfg_chg;
+
+ batch = mgmt_be_batch_create(txn, batch_id);
+ if (!batch) {
+ MGMTD_BE_CLIENT_ERR("Batch create failed!");
+ return -1;
+ }
+
+ txn_req = &batch->txn_req;
+ txn_req->event = MGMTD_BE_TXN_PROC_SETCFG;
+ MGMTD_BE_CLIENT_DBG(
+ "Created Set-Config request for batch 0x%llx, txn id 0x%llx, cfg-items:%d",
+ (unsigned long long)batch_id, (unsigned long long)txn->txn_id,
+ num_req);
+
+ txn_req->req.set_cfg.num_cfg_changes = num_req;
+ for (index = 0; index < num_req; index++) {
+ cfg_chg = &txn_req->req.set_cfg.cfg_changes[index];
+
+ if (cfg_req[index]->req_type
+ == MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA)
+ cfg_chg->operation = NB_OP_DESTROY;
+ else
+ cfg_chg->operation = NB_OP_CREATE;
+
+ strlcpy(cfg_chg->xpath, cfg_req[index]->data->xpath,
+ sizeof(cfg_chg->xpath));
+ cfg_chg->value = (cfg_req[index]->data->value
+ && cfg_req[index]
+ ->data->value
+ ->encoded_str_val
+ ? strdup(cfg_req[index]
+ ->data->value
+ ->encoded_str_val)
+ : NULL);
+ if (cfg_chg->value
+ && !strncmp(cfg_chg->value, MGMTD_BE_CONTAINER_NODE_VAL,
+ strlen(MGMTD_BE_CONTAINER_NODE_VAL))) {
+ free((char *)cfg_chg->value);
+ cfg_chg->value = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+mgmt_be_process_cfgdata_req(struct mgmt_be_client_ctx *client_ctx,
+ uint64_t txn_id, uint64_t batch_id,
+ Mgmtd__YangCfgDataReq * cfg_req[], int num_req,
+ bool end_of_data)
+{
+ struct mgmt_be_txn_ctx *txn;
+
+ txn = mgmt_be_find_txn_by_id(client_ctx, txn_id);
+ if (!txn) {
+ MGMTD_BE_CLIENT_ERR(
+ "Invalid txn-id 0x%llx provided from MGMTD server",
+ (unsigned long long)txn_id);
+ mgmt_be_send_cfgdata_create_reply(
+ client_ctx, txn_id, batch_id, false,
+ "Transaction context not created yet");
+ } else {
+ mgmt_be_update_setcfg_in_batch(client_ctx, txn, batch_id,
+ cfg_req, num_req);
+ }
+
+ if (txn && end_of_data) {
+ MGMTD_BE_CLIENT_DBG("Triggering CFG_PREPARE_REQ processing");
+ mgmt_be_txn_cfg_prepare(txn);
+ }
+
+ return 0;
+}
+
+static int mgmt_be_send_apply_reply(struct mgmt_be_client_ctx *client_ctx,
+ uint64_t txn_id, uint64_t batch_ids[],
+ size_t num_batch_ids, bool success,
+ const char *error_if_any)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeCfgDataApplyReply apply_reply;
+
+ mgmtd__be_cfg_data_apply_reply__init(&apply_reply);
+ apply_reply.success = success;
+ apply_reply.txn_id = txn_id;
+ apply_reply.batch_ids = (uint64_t *)batch_ids;
+ apply_reply.n_batch_ids = num_batch_ids;
+
+ if (error_if_any)
+ apply_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REPLY;
+ be_msg.cfg_apply_reply = &apply_reply;
+
+ MGMTD_BE_CLIENT_DBG(
+ "Sending CFG_APPLY_REPLY message to MGMTD for txn 0x%llx, %d batches [0x%llx - 0x%llx]",
+ (unsigned long long)txn_id, (int)num_batch_ids,
+ success && num_batch_ids ?
+ (unsigned long long)batch_ids[0] : 0,
+ success && num_batch_ids ?
+ (unsigned long long)batch_ids[num_batch_ids - 1] : 0);
+
+ return mgmt_be_client_send_msg(client_ctx, &be_msg);
+}
+
+static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn)
+{
+ struct mgmt_be_client_ctx *client_ctx;
+ struct timeval apply_nb_cfg_start;
+ struct timeval apply_nb_cfg_end;
+ unsigned long apply_nb_cfg_tm;
+ struct mgmt_be_batch_ctx *batch;
+ char err_buf[BUFSIZ];
+ size_t num_processed;
+ static uint64_t batch_ids[MGMTD_BE_MAX_BATCH_IDS_IN_REQ];
+ bool debug_be = mgmt_debug_be_client;
+
+ assert(txn && txn->client_ctx);
+ client_ctx = txn->client_ctx;
+
+ assert(txn->nb_txn);
+ num_processed = 0;
+
+ /*
+ * Now apply all the batches we have applied in one go.
+ */
+ if (debug_be)
+ gettimeofday(&apply_nb_cfg_start, NULL);
+ (void)nb_candidate_commit_apply(txn->nb_txn, true, &txn->nb_txn_id,
+ err_buf, sizeof(err_buf) - 1);
+ if (debug_be) {
+ gettimeofday(&apply_nb_cfg_end, NULL);
+ apply_nb_cfg_tm =
+ timeval_elapsed(apply_nb_cfg_end, apply_nb_cfg_start);
+ client_ctx->avg_apply_nb_cfg_tm =
+ ((client_ctx->avg_apply_nb_cfg_tm
+ * client_ctx->num_apply_nb_cfg)
+ + apply_nb_cfg_tm)
+ / (client_ctx->num_apply_nb_cfg + 1);
+ }
+ client_ctx->num_apply_nb_cfg++;
+ txn->nb_txn = NULL;
+
+ /*
+ * Send back CFG_APPLY_REPLY for all batches applied.
+ */
+ FOREACH_BE_APPLY_BATCH_IN_LIST (txn, batch) {
+ /*
+ * No need to delete the batch yet. Will be deleted during
+ * transaction cleanup on receiving TXN_DELETE_REQ.
+ */
+ SET_FLAG(batch->flags, MGMTD_BE_TXN_FLAGS_CFG_APPLIED);
+ mgmt_be_batches_del(&txn->apply_cfgs, batch);
+ mgmt_be_batches_add_tail(&txn->cfg_batches, batch);
+
+ batch_ids[num_processed] = batch->batch_id;
+ num_processed++;
+ if (num_processed == MGMTD_BE_MAX_BATCH_IDS_IN_REQ) {
+ mgmt_be_send_apply_reply(client_ctx, txn->txn_id,
+ batch_ids, num_processed,
+ true, NULL);
+ num_processed = 0;
+ }
+ }
+
+ mgmt_be_send_apply_reply(client_ctx, txn->txn_id, batch_ids,
+ num_processed, true, NULL);
+
+ if (debug_be)
+ MGMTD_BE_CLIENT_DBG("Nb-apply-duration %lu (avg: %lu) uSec",
+ apply_nb_cfg_tm,
+ client_ctx->avg_apply_nb_cfg_tm);
+
+ return 0;
+}
+
+static int
+mgmt_be_process_cfg_apply(struct mgmt_be_client_ctx *client_ctx,
+ uint64_t txn_id)
+{
+ struct mgmt_be_txn_ctx *txn;
+
+ txn = mgmt_be_find_txn_by_id(client_ctx, txn_id);
+ if (!txn) {
+ mgmt_be_send_apply_reply(client_ctx, txn_id, NULL, 0, false,
+ "Transaction not created yet!");
+ return -1;
+ }
+
+ MGMTD_BE_CLIENT_DBG("Trigger CFG_APPLY_REQ processing");
+ mgmt_be_txn_proc_cfgapply(txn);
+
+ return 0;
+}
+
+static int
+mgmt_be_client_handle_msg(struct mgmt_be_client_ctx *client_ctx,
+ Mgmtd__BeMessage *be_msg)
+{
+ /*
+ * protobuf-c adds a max size enum with an internal, and changing by
+ * version, name; cast to an int to avoid unhandled enum warnings
+ */
+ switch ((int)be_msg->message_case) {
+ case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REPLY:
+ MGMTD_BE_CLIENT_DBG("Subscribe Reply Msg from mgmt, status %u",
+ be_msg->subscr_reply->success);
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ:
+ mgmt_be_process_txn_req(client_ctx,
+ be_msg->txn_req->txn_id,
+ be_msg->txn_req->create);
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ:
+ mgmt_be_process_cfgdata_req(
+ client_ctx, be_msg->cfg_data_req->txn_id,
+ be_msg->cfg_data_req->batch_id,
+ be_msg->cfg_data_req->data_req,
+ be_msg->cfg_data_req->n_data_req,
+ be_msg->cfg_data_req->end_of_data);
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ:
+ mgmt_be_process_cfg_apply(
+ client_ctx, (uint64_t)be_msg->cfg_apply_req->txn_id);
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_GET_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REQ:
+ /*
+ * TODO: Add handling code in future.
+ */
+ break;
+ /*
+ * NOTE: The following messages are always sent from Backend
+ * clients to MGMTd only and/or need not be handled here.
+ */
+ case MGMTD__BE_MESSAGE__MESSAGE_GET_REPLY:
+ case MGMTD__BE_MESSAGE__MESSAGE_TXN_REPLY:
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REPLY:
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REPLY:
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REPLY:
+ case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REPLY:
+ case MGMTD__BE_MESSAGE__MESSAGE_NOTIFY_DATA:
+ case MGMTD__BE_MESSAGE__MESSAGE__NOT_SET:
+ default:
+ /*
+ * A 'default' case is being added contrary to the
+ * FRR code guidelines to take care of build
+ * failures on certain build systems (courtesy of
+ * the proto-c package).
+ */
+ break;
+ }
+
+ return 0;
+}
+
+static void mgmt_be_client_process_msg(void *user_ctx, uint8_t *data,
+ size_t len)
+{
+ struct mgmt_be_client_ctx *client_ctx = user_ctx;
+ Mgmtd__BeMessage *be_msg;
+
+ be_msg = mgmtd__be_message__unpack(NULL, len, data);
+ if (!be_msg) {
+ MGMTD_BE_CLIENT_DBG("Failed to decode %zu bytes from server",
+ len);
+ return;
+ }
+ MGMTD_BE_CLIENT_DBG(
+ "Decoded %zu bytes of message(msg: %u/%u) from server", len,
+ be_msg->message_case, be_msg->message_case);
+ (void)mgmt_be_client_handle_msg(client_ctx, be_msg);
+ mgmtd__be_message__free_unpacked(be_msg, NULL);
+}
+
+static void mgmt_be_client_proc_msgbufs(struct event *thread)
+{
+ struct mgmt_be_client_ctx *client_ctx = EVENT_ARG(thread);
+
+ if (mgmt_msg_procbufs(&client_ctx->mstate, mgmt_be_client_process_msg,
+ client_ctx, mgmt_debug_be_client))
+ mgmt_be_client_register_event(client_ctx, MGMTD_BE_PROC_MSG);
+}
+
+static void mgmt_be_client_read(struct event *thread)
+{
+ struct mgmt_be_client_ctx *client_ctx = EVENT_ARG(thread);
+ enum mgmt_msg_rsched rv;
+
+ rv = mgmt_msg_read(&client_ctx->mstate, client_ctx->conn_fd,
+ mgmt_debug_be_client);
+ if (rv == MSR_DISCONNECT) {
+ mgmt_be_server_disconnect(client_ctx, true);
+ return;
+ }
+ if (rv == MSR_SCHED_BOTH)
+ mgmt_be_client_register_event(client_ctx, MGMTD_BE_PROC_MSG);
+ mgmt_be_client_register_event(client_ctx, MGMTD_BE_CONN_READ);
+}
+
+static inline void
+mgmt_be_client_sched_msg_write(struct mgmt_be_client_ctx *client_ctx)
+{
+ if (!CHECK_FLAG(client_ctx->flags, MGMTD_BE_CLIENT_FLAGS_WRITES_OFF))
+ mgmt_be_client_register_event(client_ctx,
+ MGMTD_BE_CONN_WRITE);
+}
+
+static inline void
+mgmt_be_client_writes_on(struct mgmt_be_client_ctx *client_ctx)
+{
+ MGMTD_BE_CLIENT_DBG("Resume writing msgs");
+ UNSET_FLAG(client_ctx->flags, MGMTD_BE_CLIENT_FLAGS_WRITES_OFF);
+ mgmt_be_client_sched_msg_write(client_ctx);
+}
+
+static inline void
+mgmt_be_client_writes_off(struct mgmt_be_client_ctx *client_ctx)
+{
+ SET_FLAG(client_ctx->flags, MGMTD_BE_CLIENT_FLAGS_WRITES_OFF);
+ MGMTD_BE_CLIENT_DBG("Paused writing msgs");
+}
+
+static int mgmt_be_client_send_msg(struct mgmt_be_client_ctx *client_ctx,
+ Mgmtd__BeMessage *be_msg)
+{
+ if (client_ctx->conn_fd == -1) {
+ MGMTD_BE_CLIENT_DBG("can't send message on closed connection");
+ return -1;
+ }
+
+ int rv = mgmt_msg_send_msg(
+ &client_ctx->mstate, be_msg,
+ mgmtd__be_message__get_packed_size(be_msg),
+ (size_t(*)(void *, void *))mgmtd__be_message__pack,
+ mgmt_debug_be_client);
+ mgmt_be_client_sched_msg_write(client_ctx);
+ return rv;
+}
+
+static void mgmt_be_client_write(struct event *thread)
+{
+ struct mgmt_be_client_ctx *client_ctx = EVENT_ARG(thread);
+ enum mgmt_msg_wsched rv;
+
+ rv = mgmt_msg_write(&client_ctx->mstate, client_ctx->conn_fd,
+ mgmt_debug_be_client);
+ if (rv == MSW_SCHED_STREAM)
+ mgmt_be_client_register_event(client_ctx, MGMTD_BE_CONN_WRITE);
+ else if (rv == MSW_DISCONNECT)
+ mgmt_be_server_disconnect(client_ctx, true);
+ else if (rv == MSW_SCHED_WRITES_OFF) {
+ mgmt_be_client_writes_off(client_ctx);
+ mgmt_be_client_register_event(client_ctx,
+ MGMTD_BE_CONN_WRITES_ON);
+ } else
+ assert(rv == MSW_SCHED_NONE);
+}
+
+static void mgmt_be_client_resume_writes(struct event *thread)
+{
+ struct mgmt_be_client_ctx *client_ctx;
+
+ client_ctx = (struct mgmt_be_client_ctx *)EVENT_ARG(thread);
+ assert(client_ctx && client_ctx->conn_fd != -1);
+
+ mgmt_be_client_writes_on(client_ctx);
+}
+
+static int mgmt_be_send_subscr_req(struct mgmt_be_client_ctx *client_ctx,
+ bool subscr_xpaths, uint16_t num_reg_xpaths,
+ char **reg_xpaths)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeSubscribeReq subscr_req;
+
+ mgmtd__be_subscribe_req__init(&subscr_req);
+ subscr_req.client_name = client_ctx->client_params.name;
+ subscr_req.n_xpath_reg = num_reg_xpaths;
+ if (num_reg_xpaths)
+ subscr_req.xpath_reg = reg_xpaths;
+ else
+ subscr_req.xpath_reg = NULL;
+ subscr_req.subscribe_xpaths = subscr_xpaths;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ;
+ be_msg.subscr_req = &subscr_req;
+
+ return mgmt_be_client_send_msg(client_ctx, &be_msg);
+}
+
+static void mgmt_be_server_connect(struct mgmt_be_client_ctx *client_ctx)
+{
+ const char *dbgtag = mgmt_debug_be_client ? "BE-client" : NULL;
+
+ assert(client_ctx->conn_fd == -1);
+ client_ctx->conn_fd = mgmt_msg_connect(
+ MGMTD_BE_SERVER_PATH, MGMTD_SOCKET_BE_SEND_BUF_SIZE,
+ MGMTD_SOCKET_BE_RECV_BUF_SIZE, dbgtag);
+
+ /* Send SUBSCRIBE_REQ message */
+ if (client_ctx->conn_fd == -1 ||
+ mgmt_be_send_subscr_req(client_ctx, false, 0, NULL) != 0) {
+ mgmt_be_server_disconnect(client_ctx, true);
+ return;
+ }
+
+ /* Start reading from the socket */
+ mgmt_be_client_register_event(client_ctx, MGMTD_BE_CONN_READ);
+
+ /* Notify client through registered callback (if any) */
+ if (client_ctx->client_params.client_connect_notify)
+ (void)(*client_ctx->client_params.client_connect_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data, true);
+}
+
+static void mgmt_be_client_conn_timeout(struct event *thread)
+{
+ mgmt_be_server_connect(EVENT_ARG(thread));
+}
+
+static void
+mgmt_be_client_register_event(struct mgmt_be_client_ctx *client_ctx,
+ enum mgmt_be_event event)
+{
+ struct timeval tv = {0};
+
+ switch (event) {
+ case MGMTD_BE_CONN_READ:
+ event_add_read(client_ctx->tm, mgmt_be_client_read,
+ client_ctx, client_ctx->conn_fd,
+ &client_ctx->conn_read_ev);
+ break;
+ case MGMTD_BE_CONN_WRITE:
+ event_add_write(client_ctx->tm, mgmt_be_client_write,
+ client_ctx, client_ctx->conn_fd,
+ &client_ctx->conn_write_ev);
+ break;
+ case MGMTD_BE_PROC_MSG:
+ tv.tv_usec = MGMTD_BE_MSG_PROC_DELAY_USEC;
+ event_add_timer_tv(client_ctx->tm, mgmt_be_client_proc_msgbufs,
+ client_ctx, &tv, &client_ctx->msg_proc_ev);
+ break;
+ case MGMTD_BE_CONN_WRITES_ON:
+ event_add_timer_msec(client_ctx->tm,
+ mgmt_be_client_resume_writes, client_ctx,
+ MGMTD_BE_MSG_WRITE_DELAY_MSEC,
+ &client_ctx->conn_writes_on);
+ break;
+ case MGMTD_BE_SERVER:
+ case MGMTD_BE_CONN_INIT:
+ case MGMTD_BE_SCHED_CFG_PREPARE:
+ case MGMTD_BE_RESCHED_CFG_PREPARE:
+ case MGMTD_BE_SCHED_CFG_APPLY:
+ case MGMTD_BE_RESCHED_CFG_APPLY:
+ assert(!"mgmt_be_client_post_event() called incorrectly");
+ break;
+ }
+}
+
+static void
+mgmt_be_client_schedule_conn_retry(struct mgmt_be_client_ctx *client_ctx,
+ unsigned long intvl_secs)
+{
+ MGMTD_BE_CLIENT_DBG(
+ "Scheduling MGMTD Backend server connection retry after %lu seconds",
+ intvl_secs);
+ event_add_timer(client_ctx->tm, mgmt_be_client_conn_timeout,
+ (void *)client_ctx, intvl_secs,
+ &client_ctx->conn_retry_tmr);
+}
+
+extern struct nb_config *running_config;
+
+/*
+ * Initialize library and try connecting with MGMTD.
+ */
+uintptr_t mgmt_be_client_lib_init(struct mgmt_be_client_params *params,
+ struct event_loop *master_thread)
+{
+ assert(master_thread && params && strlen(params->name)
+ && !mgmt_be_client_ctx.tm);
+
+ mgmt_be_client_ctx.tm = master_thread;
+
+ if (!running_config)
+ assert(!"MGMTD Be Client lib_init() after frr_init() only!");
+ mgmt_be_client_ctx.running_config = running_config;
+ mgmt_be_client_ctx.candidate_config = nb_config_new(NULL);
+
+ memcpy(&mgmt_be_client_ctx.client_params, params,
+ sizeof(mgmt_be_client_ctx.client_params));
+ if (!mgmt_be_client_ctx.client_params.conn_retry_intvl_sec)
+ mgmt_be_client_ctx.client_params.conn_retry_intvl_sec =
+ MGMTD_BE_DEFAULT_CONN_RETRY_INTVL_SEC;
+
+ mgmt_be_txns_init(&mgmt_be_client_ctx.txn_head);
+ mgmt_msg_init(&mgmt_be_client_ctx.mstate, MGMTD_BE_MAX_NUM_MSG_PROC,
+ MGMTD_BE_MAX_NUM_MSG_WRITE, MGMTD_BE_MSG_MAX_LEN,
+ "BE-client");
+
+ /* Start trying to connect to MGMTD backend server immediately */
+ mgmt_be_client_schedule_conn_retry(&mgmt_be_client_ctx, 1);
+
+ MGMTD_BE_CLIENT_DBG("Initialized client '%s'", params->name);
+
+ return (uintptr_t)&mgmt_be_client_ctx;
+}
+
+/*
+ * Subscribe with MGMTD for one or more YANG subtree(s).
+ */
+enum mgmt_result mgmt_be_subscribe_yang_data(uintptr_t lib_hndl,
+ char *reg_yang_xpaths[],
+ int num_reg_xpaths)
+{
+ struct mgmt_be_client_ctx *client_ctx;
+
+ client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (mgmt_be_send_subscr_req(client_ctx, true, num_reg_xpaths,
+ reg_yang_xpaths)
+ != 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Unsubscribe with MGMTD for one or more YANG subtree(s).
+ */
+enum mgmt_result mgmt_be_unsubscribe_yang_data(uintptr_t lib_hndl,
+ char *reg_yang_xpaths[],
+ int num_reg_xpaths)
+{
+ struct mgmt_be_client_ctx *client_ctx;
+
+ client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+
+ if (mgmt_be_send_subscr_req(client_ctx, false, num_reg_xpaths,
+ reg_yang_xpaths)
+ < 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Send one or more YANG notifications to MGMTD daemon.
+ */
+enum mgmt_result mgmt_be_send_yang_notify(uintptr_t lib_hndl,
+ Mgmtd__YangData * data_elems[],
+ int num_elems)
+{
+ struct mgmt_be_client_ctx *client_ctx;
+
+ client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Destroy library and cleanup everything.
+ */
+void mgmt_be_client_lib_destroy(uintptr_t lib_hndl)
+{
+ struct mgmt_be_client_ctx *client_ctx;
+
+ client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
+ assert(client_ctx);
+
+ MGMTD_BE_CLIENT_DBG("Destroying MGMTD Backend Client '%s'",
+ client_ctx->client_params.name);
+
+ mgmt_be_server_disconnect(client_ctx, false);
+
+ mgmt_msg_destroy(&client_ctx->mstate);
+
+ EVENT_OFF(client_ctx->conn_retry_tmr);
+ EVENT_OFF(client_ctx->conn_read_ev);
+ EVENT_OFF(client_ctx->conn_write_ev);
+ EVENT_OFF(client_ctx->conn_writes_on);
+ EVENT_OFF(client_ctx->msg_proc_ev);
+ mgmt_be_cleanup_all_txns(client_ctx);
+ mgmt_be_txns_fini(&client_ctx->txn_head);
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Client Library api interfaces
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_BE_CLIENT_H_
+#define _FRR_MGMTD_BE_CLIENT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "northbound.h"
+#include "mgmt_pb.h"
+#include "mgmtd/mgmt_defines.h"
+
+/***************************************************************
+ * Client IDs
+ ***************************************************************/
+
+/*
+ * Add enum value for each supported component, wrap with
+ * #ifdef HAVE_COMPONENT
+ */
+enum mgmt_be_client_id {
+ MGMTD_BE_CLIENT_ID_MIN = 0,
+ MGMTD_BE_CLIENT_ID_INIT = -1,
+#ifdef HAVE_STATICD
+ MGMTD_BE_CLIENT_ID_STATICD,
+#endif
+ MGMTD_BE_CLIENT_ID_MAX
+};
+
+#define FOREACH_MGMTD_BE_CLIENT_ID(id) \
+ for ((id) = MGMTD_BE_CLIENT_ID_MIN; \
+ (id) < MGMTD_BE_CLIENT_ID_MAX; (id)++)
+
+/***************************************************************
+ * Constants
+ ***************************************************************/
+
+#define MGMTD_BE_CLIENT_ERROR_STRING_MAX_LEN 32
+
+#define MGMTD_BE_DEFAULT_CONN_RETRY_INTVL_SEC 5
+
+#define MGMTD_BE_MSG_PROC_DELAY_USEC 10
+#define MGMTD_BE_MAX_NUM_MSG_PROC 500
+
+#define MGMTD_BE_MSG_WRITE_DELAY_MSEC 1
+#define MGMTD_BE_MAX_NUM_MSG_WRITE 1000
+
+#define GMGD_BE_MAX_NUM_REQ_ITEMS 64
+
+#define MGMTD_BE_MSG_MAX_LEN 16384
+
+#define MGMTD_SOCKET_BE_SEND_BUF_SIZE 65535
+#define MGMTD_SOCKET_BE_RECV_BUF_SIZE MGMTD_SOCKET_BE_SEND_BUF_SIZE
+
+#define MGMTD_MAX_CFG_CHANGES_IN_BATCH \
+ ((10 * MGMTD_BE_MSG_MAX_LEN) / \
+ (MGMTD_MAX_XPATH_LEN + MGMTD_MAX_YANG_VALUE_LEN))
+
+/*
+ * MGMTD_BE_MSG_MAX_LEN must be used 80%
+ * since there is overhead of google protobuf
+ * that gets added to sent message
+ */
+#define MGMTD_BE_CFGDATA_PACKING_EFFICIENCY 0.8
+#define MGMTD_BE_CFGDATA_MAX_MSG_LEN \
+ (MGMTD_BE_MSG_MAX_LEN * MGMTD_BE_CFGDATA_PACKING_EFFICIENCY)
+
+#define MGMTD_BE_MAX_BATCH_IDS_IN_REQ \
+ (MGMTD_BE_MSG_MAX_LEN - 128) / sizeof(uint64_t)
+
+#define MGMTD_BE_CONTAINER_NODE_VAL "<<container>>"
+
+/***************************************************************
+ * Data-structures
+ ***************************************************************/
+
+#define MGMTD_BE_MAX_CLIENTS_PER_XPATH_REG 32
+
+struct mgmt_be_client_txn_ctx {
+ uintptr_t *user_ctx;
+};
+
+/*
+ * All the client-specific information this library needs to
+ * initialize itself, setup connection with MGMTD BackEnd interface
+ * and carry on all required procedures appropriately.
+ *
+ * BackEnd clients need to initialise a instance of this structure
+ * with appropriate data and pass it while calling the API
+ * to initialize the library (See mgmt_be_client_lib_init for
+ * more details).
+ */
+struct mgmt_be_client_params {
+ char name[MGMTD_CLIENT_NAME_MAX_LEN];
+ uintptr_t user_data;
+ unsigned long conn_retry_intvl_sec;
+
+ void (*client_connect_notify)(uintptr_t lib_hndl,
+ uintptr_t usr_data,
+ bool connected);
+
+ void (*client_subscribe_notify)(
+ uintptr_t lib_hndl, uintptr_t usr_data,
+ struct nb_yang_xpath **xpath,
+ enum mgmt_result subscribe_result[], int num_paths);
+
+ void (*txn_notify)(
+ uintptr_t lib_hndl, uintptr_t usr_data,
+ struct mgmt_be_client_txn_ctx *txn_ctx, bool destroyed);
+
+ enum mgmt_result (*data_validate)(
+ uintptr_t lib_hndl, uintptr_t usr_data,
+ struct mgmt_be_client_txn_ctx *txn_ctx,
+ struct nb_yang_xpath *xpath, struct nb_yang_value *data,
+ bool delete, char *error_if_any);
+
+ enum mgmt_result (*data_apply)(
+ uintptr_t lib_hndl, uintptr_t usr_data,
+ struct mgmt_be_client_txn_ctx *txn_ctx,
+ struct nb_yang_xpath *xpath, struct nb_yang_value *data,
+ bool delete);
+
+ enum mgmt_result (*get_data_elem)(
+ uintptr_t lib_hndl, uintptr_t usr_data,
+ struct mgmt_be_client_txn_ctx *txn_ctx,
+ struct nb_yang_xpath *xpath, struct nb_yang_xpath_elem *elem);
+
+ enum mgmt_result (*get_data)(
+ uintptr_t lib_hndl, uintptr_t usr_data,
+ struct mgmt_be_client_txn_ctx *txn_ctx,
+ struct nb_yang_xpath *xpath, bool keys_only,
+ struct nb_yang_xpath_elem **elems, int *num_elems,
+ int *next_key);
+
+ enum mgmt_result (*get_next_data)(
+ uintptr_t lib_hndl, uintptr_t usr_data,
+ struct mgmt_be_client_txn_ctx *txn_ctx,
+ struct nb_yang_xpath *xpath, bool keys_only,
+ struct nb_yang_xpath_elem **elems, int *num_elems);
+};
+
+/***************************************************************
+ * Global data exported
+ ***************************************************************/
+
+extern const char *mgmt_be_client_names[MGMTD_BE_CLIENT_ID_MAX + 1];
+
+static inline const char *mgmt_be_client_id2name(enum mgmt_be_client_id id)
+{
+ if (id > MGMTD_BE_CLIENT_ID_MAX)
+ id = MGMTD_BE_CLIENT_ID_MAX;
+ return mgmt_be_client_names[id];
+}
+
+static inline enum mgmt_be_client_id
+mgmt_be_client_name2id(const char *name)
+{
+ enum mgmt_be_client_id id;
+
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (!strncmp(mgmt_be_client_names[id], name,
+ MGMTD_CLIENT_NAME_MAX_LEN))
+ return id;
+ }
+
+ return MGMTD_BE_CLIENT_ID_MAX;
+}
+
+/***************************************************************
+ * API prototypes
+ ***************************************************************/
+
+/*
+ * Initialize library and try connecting with MGMTD.
+ *
+ * params
+ * Backend client parameters.
+ *
+ * master_thread
+ * Thread master.
+ *
+ * Returns:
+ * Backend client lib handler (nothing but address of mgmt_be_client_ctx)
+ */
+extern uintptr_t mgmt_be_client_lib_init(struct mgmt_be_client_params *params,
+ struct event_loop *master_thread);
+
+/*
+ * Subscribe with MGMTD for one or more YANG subtree(s).
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * reg_yang_xpaths
+ * Yang xpath(s) that needs to be subscribed to.
+ *
+ * num_xpaths
+ * Number of xpaths
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result mgmt_be_subscribe_yang_data(uintptr_t lib_hndl,
+ char **reg_yang_xpaths,
+ int num_xpaths);
+
+/*
+ * Send one or more YANG notifications to MGMTD daemon.
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * data_elems
+ * Yang data elements from data tree.
+ *
+ * num_elems
+ * Number of data elements.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_be_send_yang_notify(uintptr_t lib_hndl, Mgmtd__YangData **data_elems,
+ int num_elems);
+
+/*
+ * Un-subscribe with MGMTD for one or more YANG subtree(s).
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * reg_yang_xpaths
+ * Yang xpath(s) that needs to be un-subscribed from.
+ *
+ * num_reg_xpaths
+ * Number of subscribed xpaths
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+enum mgmt_result mgmt_be_unsubscribe_yang_data(uintptr_t lib_hndl,
+ char **reg_yang_xpaths,
+ int num_reg_xpaths);
+
+/*
+ * Destroy library and cleanup everything.
+ */
+extern void mgmt_be_client_lib_destroy(uintptr_t lib_hndl);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FRR_MGMTD_BE_CLIENT_H_ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Library api interfaces
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "memory.h"
+#include "libfrr.h"
+#include "mgmt_fe_client.h"
+#include "mgmt_msg.h"
+#include "mgmt_pb.h"
+#include "network.h"
+#include "stream.h"
+#include "sockopt.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_FE_CLIENT_DBG(fmt, ...) \
+ fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_FE_CLIENT_ERR(fmt, ...) \
+ fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_FE_CLIENT_DBG(fmt, ...) \
+ do { \
+ if (mgmt_debug_fe_client) \
+ zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define MGMTD_FE_CLIENT_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+struct mgmt_fe_client_ctx;
+
+PREDECL_LIST(mgmt_sessions);
+
+struct mgmt_fe_client_session {
+ uint64_t client_id;
+ uint64_t session_id;
+ struct mgmt_fe_client_ctx *client_ctx;
+ uintptr_t user_ctx;
+
+ struct mgmt_sessions_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_sessions, struct mgmt_fe_client_session, list_linkage);
+
+DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_SESSION, "MGMTD Frontend session");
+
+struct mgmt_fe_client_ctx {
+ int conn_fd;
+ struct event_loop *tm;
+ struct event *conn_retry_tmr;
+ struct event *conn_read_ev;
+ struct event *conn_write_ev;
+ struct event *conn_writes_on;
+ struct event *msg_proc_ev;
+ uint32_t flags;
+
+ struct mgmt_msg_state mstate;
+
+ struct mgmt_fe_client_params client_params;
+
+ struct mgmt_sessions_head client_sessions;
+};
+
+#define MGMTD_FE_CLIENT_FLAGS_WRITES_OFF (1U << 0)
+
+#define FOREACH_SESSION_IN_LIST(client_ctx, session) \
+ frr_each_safe (mgmt_sessions, &(client_ctx)->client_sessions, (session))
+
+static bool mgmt_debug_fe_client;
+
+static struct mgmt_fe_client_ctx mgmt_fe_client_ctx = {
+ .conn_fd = -1,
+};
+
+/* Forward declarations */
+static void
+mgmt_fe_client_register_event(struct mgmt_fe_client_ctx *client_ctx,
+ enum mgmt_fe_event event);
+static void mgmt_fe_client_schedule_conn_retry(
+ struct mgmt_fe_client_ctx *client_ctx, unsigned long intvl_secs);
+
+static struct mgmt_fe_client_session *
+mgmt_fe_find_session_by_client_id(struct mgmt_fe_client_ctx *client_ctx,
+ uint64_t client_id)
+{
+ struct mgmt_fe_client_session *session;
+
+ FOREACH_SESSION_IN_LIST (client_ctx, session) {
+ if (session->client_id == client_id) {
+ MGMTD_FE_CLIENT_DBG(
+ "Found session %p for client-id %llu.", session,
+ (unsigned long long)client_id);
+ return session;
+ }
+ }
+
+ return NULL;
+}
+
+static struct mgmt_fe_client_session *
+mgmt_fe_find_session_by_session_id(struct mgmt_fe_client_ctx *client_ctx,
+ uint64_t session_id)
+{
+ struct mgmt_fe_client_session *session;
+
+ FOREACH_SESSION_IN_LIST (client_ctx, session) {
+ if (session->session_id == session_id) {
+ MGMTD_FE_CLIENT_DBG(
+ "Found session %p for session-id %llu.", session,
+ (unsigned long long)session_id);
+ return session;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+mgmt_fe_server_disconnect(struct mgmt_fe_client_ctx *client_ctx,
+ bool reconnect)
+{
+ if (client_ctx->conn_fd != -1) {
+ close(client_ctx->conn_fd);
+ client_ctx->conn_fd = -1;
+ }
+
+ if (reconnect)
+ mgmt_fe_client_schedule_conn_retry(
+ client_ctx,
+ client_ctx->client_params.conn_retry_intvl_sec);
+}
+
+static inline void
+mgmt_fe_client_sched_msg_write(struct mgmt_fe_client_ctx *client_ctx)
+{
+ if (!CHECK_FLAG(client_ctx->flags, MGMTD_FE_CLIENT_FLAGS_WRITES_OFF))
+ mgmt_fe_client_register_event(client_ctx,
+ MGMTD_FE_CONN_WRITE);
+}
+
+static inline void
+mgmt_fe_client_writes_on(struct mgmt_fe_client_ctx *client_ctx)
+{
+ MGMTD_FE_CLIENT_DBG("Resume writing msgs");
+ UNSET_FLAG(client_ctx->flags, MGMTD_FE_CLIENT_FLAGS_WRITES_OFF);
+ mgmt_fe_client_sched_msg_write(client_ctx);
+}
+
+static inline void
+mgmt_fe_client_writes_off(struct mgmt_fe_client_ctx *client_ctx)
+{
+ SET_FLAG(client_ctx->flags, MGMTD_FE_CLIENT_FLAGS_WRITES_OFF);
+ MGMTD_FE_CLIENT_DBG("Paused writing msgs");
+}
+
+static int mgmt_fe_client_send_msg(struct mgmt_fe_client_ctx *client_ctx,
+ Mgmtd__FeMessage *fe_msg)
+{
+ /* users current expect this to fail here */
+ if (client_ctx->conn_fd == -1) {
+ MGMTD_FE_CLIENT_DBG("can't send message on closed connection");
+ return -1;
+ }
+
+ int rv = mgmt_msg_send_msg(
+ &client_ctx->mstate, fe_msg,
+ mgmtd__fe_message__get_packed_size(fe_msg),
+ (size_t(*)(void *, void *))mgmtd__fe_message__pack,
+ mgmt_debug_fe_client);
+ mgmt_fe_client_sched_msg_write(client_ctx);
+ return rv;
+}
+
+static void mgmt_fe_client_write(struct event *thread)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ enum mgmt_msg_wsched rv;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)EVENT_ARG(thread);
+ rv = mgmt_msg_write(&client_ctx->mstate, client_ctx->conn_fd,
+ mgmt_debug_fe_client);
+ if (rv == MSW_SCHED_STREAM)
+ mgmt_fe_client_register_event(client_ctx, MGMTD_FE_CONN_WRITE);
+ else if (rv == MSW_DISCONNECT)
+ mgmt_fe_server_disconnect(client_ctx, true);
+ else if (rv == MSW_SCHED_WRITES_OFF) {
+ mgmt_fe_client_writes_off(client_ctx);
+ mgmt_fe_client_register_event(client_ctx,
+ MGMTD_FE_CONN_WRITES_ON);
+ } else
+ assert(rv == MSW_SCHED_NONE);
+}
+
+static void mgmt_fe_client_resume_writes(struct event *thread)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)EVENT_ARG(thread);
+ assert(client_ctx && client_ctx->conn_fd != -1);
+
+ mgmt_fe_client_writes_on(client_ctx);
+}
+
+static int
+mgmt_fe_send_register_req(struct mgmt_fe_client_ctx *client_ctx)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeRegisterReq rgstr_req;
+
+ mgmtd__fe_register_req__init(&rgstr_req);
+ rgstr_req.client_name = client_ctx->client_params.name;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ;
+ fe_msg.register_req = &rgstr_req;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Sending REGISTER_REQ message to MGMTD Frontend server");
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_session_req(struct mgmt_fe_client_ctx *client_ctx,
+ struct mgmt_fe_client_session *session,
+ bool create)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeSessionReq sess_req;
+
+ mgmtd__fe_session_req__init(&sess_req);
+ sess_req.create = create;
+ if (create) {
+ sess_req.id_case = MGMTD__FE_SESSION_REQ__ID_CLIENT_CONN_ID;
+ sess_req.client_conn_id = session->client_id;
+ } else {
+ sess_req.id_case = MGMTD__FE_SESSION_REQ__ID_SESSION_ID;
+ sess_req.session_id = session->session_id;
+ }
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SESSION_REQ;
+ fe_msg.session_req = &sess_req;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Sending SESSION_REQ message for %s session %llu to MGMTD Frontend server",
+ create ? "creating" : "destroying",
+ (unsigned long long)session->client_id);
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_lockds_req(struct mgmt_fe_client_ctx *client_ctx,
+ struct mgmt_fe_client_session *session, bool lock,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id)
+{
+ (void)req_id;
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeLockDsReq lockds_req;
+
+ mgmtd__fe_lock_ds_req__init(&lockds_req);
+ lockds_req.session_id = session->session_id;
+ lockds_req.req_id = req_id;
+ lockds_req.ds_id = ds_id;
+ lockds_req.lock = lock;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REQ;
+ fe_msg.lockds_req = &lockds_req;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Sending %sLOCK_REQ message for Ds:%d session %llu to MGMTD Frontend server",
+ lock ? "" : "UN", ds_id, (unsigned long long)session->client_id);
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_setcfg_req(struct mgmt_fe_client_ctx *client_ctx,
+ struct mgmt_fe_client_session *session,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangCfgDataReq **data_req, int num_data_reqs,
+ bool implicit_commit, Mgmtd__DatastoreId dst_ds_id)
+{
+ (void)req_id;
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeSetConfigReq setcfg_req;
+
+ mgmtd__fe_set_config_req__init(&setcfg_req);
+ setcfg_req.session_id = session->session_id;
+ setcfg_req.ds_id = ds_id;
+ setcfg_req.req_id = req_id;
+ setcfg_req.data = data_req;
+ setcfg_req.n_data = (size_t)num_data_reqs;
+ setcfg_req.implicit_commit = implicit_commit;
+ setcfg_req.commit_ds_id = dst_ds_id;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REQ;
+ fe_msg.setcfg_req = &setcfg_req;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Sending SET_CONFIG_REQ message for Ds:%d session %llu (#xpaths:%d) to MGMTD Frontend server",
+ ds_id, (unsigned long long)session->client_id, num_data_reqs);
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_commitcfg_req(struct mgmt_fe_client_ctx *client_ctx,
+ struct mgmt_fe_client_session *session,
+ uint64_t req_id, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dest_ds_id, bool validate_only,
+ bool abort)
+{
+ (void)req_id;
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeCommitConfigReq commitcfg_req;
+
+ mgmtd__fe_commit_config_req__init(&commitcfg_req);
+ commitcfg_req.session_id = session->session_id;
+ commitcfg_req.src_ds_id = src_ds_id;
+ commitcfg_req.dst_ds_id = dest_ds_id;
+ commitcfg_req.req_id = req_id;
+ commitcfg_req.validate_only = validate_only;
+ commitcfg_req.abort = abort;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REQ;
+ fe_msg.commcfg_req = &commitcfg_req;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Sending COMMIT_CONFIG_REQ message for Src-Ds:%d, Dst-Ds:%d session %llu to MGMTD Frontend server",
+ src_ds_id, dest_ds_id, (unsigned long long)session->client_id);
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_getcfg_req(struct mgmt_fe_client_ctx *client_ctx,
+ struct mgmt_fe_client_session *session,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq * data_req[],
+ int num_data_reqs)
+{
+ (void)req_id;
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeGetConfigReq getcfg_req;
+
+ mgmtd__fe_get_config_req__init(&getcfg_req);
+ getcfg_req.session_id = session->session_id;
+ getcfg_req.ds_id = ds_id;
+ getcfg_req.req_id = req_id;
+ getcfg_req.data = data_req;
+ getcfg_req.n_data = (size_t)num_data_reqs;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REQ;
+ fe_msg.getcfg_req = &getcfg_req;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Sending GET_CONFIG_REQ message for Ds:%d session %llu (#xpaths:%d) to MGMTD Frontend server",
+ ds_id, (unsigned long long)session->client_id, num_data_reqs);
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_getdata_req(struct mgmt_fe_client_ctx *client_ctx,
+ struct mgmt_fe_client_session *session,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq * data_req[],
+ int num_data_reqs)
+{
+ (void)req_id;
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeGetDataReq getdata_req;
+
+ mgmtd__fe_get_data_req__init(&getdata_req);
+ getdata_req.session_id = session->session_id;
+ getdata_req.ds_id = ds_id;
+ getdata_req.req_id = req_id;
+ getdata_req.data = data_req;
+ getdata_req.n_data = (size_t)num_data_reqs;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REQ;
+ fe_msg.getdata_req = &getdata_req;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Sending GET_CONFIG_REQ message for Ds:%d session %llu (#xpaths:%d) to MGMTD Frontend server",
+ ds_id, (unsigned long long)session->client_id, num_data_reqs);
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int mgmt_fe_send_regnotify_req(
+ struct mgmt_fe_client_ctx *client_ctx,
+ struct mgmt_fe_client_session *session, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, bool register_req,
+ Mgmtd__YangDataXPath * data_req[], int num_data_reqs)
+{
+ (void)req_id;
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeRegisterNotifyReq regntfy_req;
+
+ mgmtd__fe_register_notify_req__init(®ntfy_req);
+ regntfy_req.session_id = session->session_id;
+ regntfy_req.ds_id = ds_id;
+ regntfy_req.register_req = register_req;
+ regntfy_req.data_xpath = data_req;
+ regntfy_req.n_data_xpath = (size_t)num_data_reqs;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ;
+ fe_msg.regnotify_req = ®ntfy_req;
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
+ Mgmtd__FeMessage *fe_msg)
+{
+ struct mgmt_fe_client_session *session = NULL;
+
+ /*
+ * protobuf-c adds a max size enum with an internal, and changing by
+ * version, name; cast to an int to avoid unhandled enum warnings
+ */
+ switch ((int)fe_msg->message_case) {
+ case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REPLY:
+ if (fe_msg->session_reply->create
+ && fe_msg->session_reply->has_client_conn_id) {
+ MGMTD_FE_CLIENT_DBG(
+ "Got Session Create Reply Msg for client-id %llu with session-id: %llu.",
+ (unsigned long long)
+ fe_msg->session_reply->client_conn_id,
+ (unsigned long long)
+ fe_msg->session_reply->session_id);
+
+ session = mgmt_fe_find_session_by_client_id(
+ client_ctx,
+ fe_msg->session_reply->client_conn_id);
+
+ if (session && fe_msg->session_reply->success) {
+ MGMTD_FE_CLIENT_DBG(
+ "Session Create for client-id %llu successful.",
+ (unsigned long long)fe_msg
+ ->session_reply->client_conn_id);
+ session->session_id =
+ fe_msg->session_reply->session_id;
+ } else {
+ MGMTD_FE_CLIENT_ERR(
+ "Session Create for client-id %llu failed.",
+ (unsigned long long)fe_msg
+ ->session_reply->client_conn_id);
+ }
+ } else if (!fe_msg->session_reply->create) {
+ MGMTD_FE_CLIENT_DBG(
+ "Got Session Destroy Reply Msg for session-id %llu",
+ (unsigned long long)
+ fe_msg->session_reply->session_id);
+
+ session = mgmt_fe_find_session_by_session_id(
+ client_ctx, fe_msg->session_req->session_id);
+ }
+
+ if (session && session->client_ctx
+ && session->client_ctx->client_params
+ .client_session_notify)
+ (*session->client_ctx->client_params
+ .client_session_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data,
+ session->client_id,
+ fe_msg->session_reply->create,
+ fe_msg->session_reply->success,
+ (uintptr_t)session, session->user_ctx);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY:
+ MGMTD_FE_CLIENT_DBG(
+ "Got LockDs Reply Msg for session-id %llu",
+ (unsigned long long)
+ fe_msg->lockds_reply->session_id);
+ session = mgmt_fe_find_session_by_session_id(
+ client_ctx, fe_msg->lockds_reply->session_id);
+
+ if (session && session->client_ctx
+ && session->client_ctx->client_params
+ .lock_ds_notify)
+ (*session->client_ctx->client_params
+ .lock_ds_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data,
+ session->client_id, (uintptr_t)session,
+ session->user_ctx,
+ fe_msg->lockds_reply->req_id,
+ fe_msg->lockds_reply->lock,
+ fe_msg->lockds_reply->success,
+ fe_msg->lockds_reply->ds_id,
+ fe_msg->lockds_reply->error_if_any);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY:
+ MGMTD_FE_CLIENT_DBG(
+ "Got Set Config Reply Msg for session-id %llu",
+ (unsigned long long)
+ fe_msg->setcfg_reply->session_id);
+
+ session = mgmt_fe_find_session_by_session_id(
+ client_ctx, fe_msg->setcfg_reply->session_id);
+
+ if (session && session->client_ctx
+ && session->client_ctx->client_params
+ .set_config_notify)
+ (*session->client_ctx->client_params
+ .set_config_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data,
+ session->client_id, (uintptr_t)session,
+ session->user_ctx,
+ fe_msg->setcfg_reply->req_id,
+ fe_msg->setcfg_reply->success,
+ fe_msg->setcfg_reply->ds_id,
+ fe_msg->setcfg_reply->error_if_any);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY:
+ MGMTD_FE_CLIENT_DBG(
+ "Got Commit Config Reply Msg for session-id %llu",
+ (unsigned long long)
+ fe_msg->commcfg_reply->session_id);
+
+ session = mgmt_fe_find_session_by_session_id(
+ client_ctx, fe_msg->commcfg_reply->session_id);
+
+ if (session && session->client_ctx
+ && session->client_ctx->client_params
+ .commit_config_notify)
+ (*session->client_ctx->client_params
+ .commit_config_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data,
+ session->client_id, (uintptr_t)session,
+ session->user_ctx,
+ fe_msg->commcfg_reply->req_id,
+ fe_msg->commcfg_reply->success,
+ fe_msg->commcfg_reply->src_ds_id,
+ fe_msg->commcfg_reply->dst_ds_id,
+ fe_msg->commcfg_reply->validate_only,
+ fe_msg->commcfg_reply->error_if_any);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REPLY:
+ MGMTD_FE_CLIENT_DBG(
+ "Got Get Config Reply Msg for session-id %llu",
+ (unsigned long long)
+ fe_msg->getcfg_reply->session_id);
+
+ session = mgmt_fe_find_session_by_session_id(
+ client_ctx, fe_msg->getcfg_reply->session_id);
+
+ if (session && session->client_ctx
+ && session->client_ctx->client_params
+ .get_data_notify)
+ (*session->client_ctx->client_params
+ .get_data_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data,
+ session->client_id, (uintptr_t)session,
+ session->user_ctx,
+ fe_msg->getcfg_reply->req_id,
+ fe_msg->getcfg_reply->success,
+ fe_msg->getcfg_reply->ds_id,
+ fe_msg->getcfg_reply->data
+ ? fe_msg->getcfg_reply->data->data
+ : NULL,
+ fe_msg->getcfg_reply->data
+ ? fe_msg->getcfg_reply->data->n_data
+ : 0,
+ fe_msg->getcfg_reply->data
+ ? fe_msg->getcfg_reply->data
+ ->next_indx
+ : 0,
+ fe_msg->getcfg_reply->error_if_any);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REPLY:
+ MGMTD_FE_CLIENT_DBG(
+ "Got Get Data Reply Msg for session-id %llu",
+ (unsigned long long)
+ fe_msg->getdata_reply->session_id);
+
+ session = mgmt_fe_find_session_by_session_id(
+ client_ctx, fe_msg->getdata_reply->session_id);
+
+ if (session && session->client_ctx
+ && session->client_ctx->client_params
+ .get_data_notify)
+ (*session->client_ctx->client_params
+ .get_data_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data,
+ session->client_id, (uintptr_t)session,
+ session->user_ctx,
+ fe_msg->getdata_reply->req_id,
+ fe_msg->getdata_reply->success,
+ fe_msg->getdata_reply->ds_id,
+ fe_msg->getdata_reply->data
+ ? fe_msg->getdata_reply->data->data
+ : NULL,
+ fe_msg->getdata_reply->data
+ ? fe_msg->getdata_reply->data
+ ->n_data
+ : 0,
+ fe_msg->getdata_reply->data
+ ? fe_msg->getdata_reply->data
+ ->next_indx
+ : 0,
+ fe_msg->getdata_reply->error_if_any);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_NOTIFY_DATA_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ:
+ /*
+ * TODO: Add handling code in future.
+ */
+ break;
+ /*
+ * NOTE: The following messages are always sent from Frontend
+ * clients to MGMTd only and/or need not be handled here.
+ */
+ case MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE__NOT_SET:
+ default:
+ /*
+ * A 'default' case is being added contrary to the
+ * FRR code guidelines to take care of build
+ * failures on certain build systems (courtesy of
+ * the proto-c package).
+ */
+ break;
+ }
+
+ return 0;
+}
+
+static void mgmt_fe_client_process_msg(void *user_ctx, uint8_t *data,
+ size_t len)
+{
+ struct mgmt_fe_client_ctx *client_ctx = user_ctx;
+ Mgmtd__FeMessage *fe_msg;
+
+ fe_msg = mgmtd__fe_message__unpack(NULL, len, data);
+ if (!fe_msg) {
+ MGMTD_FE_CLIENT_DBG("Failed to decode %zu bytes from server.",
+ len);
+ return;
+ }
+ MGMTD_FE_CLIENT_DBG(
+ "Decoded %zu bytes of message(msg: %u/%u) from server", len,
+ fe_msg->message_case, fe_msg->message_case);
+ (void)mgmt_fe_client_handle_msg(client_ctx, fe_msg);
+ mgmtd__fe_message__free_unpacked(fe_msg, NULL);
+}
+
+static void mgmt_fe_client_proc_msgbufs(struct event *thread)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)EVENT_ARG(thread);
+ if (mgmt_msg_procbufs(&client_ctx->mstate, mgmt_fe_client_process_msg,
+ client_ctx, mgmt_debug_fe_client))
+ mgmt_fe_client_register_event(client_ctx, MGMTD_FE_PROC_MSG);
+}
+
+static void mgmt_fe_client_read(struct event *thread)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ enum mgmt_msg_rsched rv;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)EVENT_ARG(thread);
+
+ rv = mgmt_msg_read(&client_ctx->mstate, client_ctx->conn_fd,
+ mgmt_debug_fe_client);
+ if (rv == MSR_DISCONNECT) {
+ mgmt_fe_server_disconnect(client_ctx, true);
+ return;
+ }
+ if (rv == MSR_SCHED_BOTH)
+ mgmt_fe_client_register_event(client_ctx, MGMTD_FE_PROC_MSG);
+ mgmt_fe_client_register_event(client_ctx, MGMTD_FE_CONN_READ);
+}
+
+static void mgmt_fe_server_connect(struct mgmt_fe_client_ctx *client_ctx)
+{
+ const char *dbgtag = mgmt_debug_fe_client ? "FE-client" : NULL;
+
+ assert(client_ctx->conn_fd == -1);
+ client_ctx->conn_fd = mgmt_msg_connect(
+ MGMTD_FE_SERVER_PATH, MGMTD_SOCKET_FE_SEND_BUF_SIZE,
+ MGMTD_SOCKET_FE_RECV_BUF_SIZE, dbgtag);
+
+ /* Send REGISTER_REQ message */
+ if (client_ctx->conn_fd == -1 ||
+ mgmt_fe_send_register_req(client_ctx) != 0) {
+ mgmt_fe_server_disconnect(client_ctx, true);
+ return;
+ }
+
+ /* Start reading from the socket */
+ mgmt_fe_client_register_event(client_ctx, MGMTD_FE_CONN_READ);
+
+ /* Notify client through registered callback (if any) */
+ if (client_ctx->client_params.client_connect_notify)
+ (void)(*client_ctx->client_params.client_connect_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data, true);
+}
+
+
+static void mgmt_fe_client_conn_timeout(struct event *thread)
+{
+ mgmt_fe_server_connect(EVENT_ARG(thread));
+}
+
+static void
+mgmt_fe_client_register_event(struct mgmt_fe_client_ctx *client_ctx,
+ enum mgmt_fe_event event)
+{
+ struct timeval tv = {0};
+
+ switch (event) {
+ case MGMTD_FE_CONN_READ:
+ event_add_read(client_ctx->tm, mgmt_fe_client_read,
+ client_ctx, client_ctx->conn_fd,
+ &client_ctx->conn_read_ev);
+ break;
+ case MGMTD_FE_CONN_WRITE:
+ event_add_write(client_ctx->tm, mgmt_fe_client_write,
+ client_ctx, client_ctx->conn_fd,
+ &client_ctx->conn_write_ev);
+ break;
+ case MGMTD_FE_PROC_MSG:
+ tv.tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC;
+ event_add_timer_tv(client_ctx->tm,
+ mgmt_fe_client_proc_msgbufs, client_ctx,
+ &tv, &client_ctx->msg_proc_ev);
+ break;
+ case MGMTD_FE_CONN_WRITES_ON:
+ event_add_timer_msec(
+ client_ctx->tm, mgmt_fe_client_resume_writes,
+ client_ctx, MGMTD_FE_MSG_WRITE_DELAY_MSEC,
+ &client_ctx->conn_writes_on);
+ break;
+ case MGMTD_FE_SERVER:
+ assert(!"mgmt_fe_client_ctx_post_event called incorrectly");
+ break;
+ }
+}
+
+static void mgmt_fe_client_schedule_conn_retry(
+ struct mgmt_fe_client_ctx *client_ctx, unsigned long intvl_secs)
+{
+ MGMTD_FE_CLIENT_DBG(
+ "Scheduling MGMTD Frontend server connection retry after %lu seconds",
+ intvl_secs);
+ event_add_timer(client_ctx->tm, mgmt_fe_client_conn_timeout,
+ (void *)client_ctx, intvl_secs,
+ &client_ctx->conn_retry_tmr);
+}
+
+/*
+ * Initialize library and try connecting with MGMTD.
+ */
+uintptr_t mgmt_fe_client_lib_init(struct mgmt_fe_client_params *params,
+ struct event_loop *master_thread)
+{
+ assert(master_thread && params && strlen(params->name)
+ && !mgmt_fe_client_ctx.tm);
+
+ mgmt_fe_client_ctx.tm = master_thread;
+ memcpy(&mgmt_fe_client_ctx.client_params, params,
+ sizeof(mgmt_fe_client_ctx.client_params));
+ if (!mgmt_fe_client_ctx.client_params.conn_retry_intvl_sec)
+ mgmt_fe_client_ctx.client_params.conn_retry_intvl_sec =
+ MGMTD_FE_DEFAULT_CONN_RETRY_INTVL_SEC;
+
+ mgmt_msg_init(&mgmt_fe_client_ctx.mstate, MGMTD_FE_MAX_NUM_MSG_PROC,
+ MGMTD_FE_MAX_NUM_MSG_WRITE, MGMTD_FE_MSG_MAX_LEN,
+ "FE-client");
+
+ mgmt_sessions_init(&mgmt_fe_client_ctx.client_sessions);
+
+ /* Start trying to connect to MGMTD frontend server immediately */
+ mgmt_fe_client_schedule_conn_retry(&mgmt_fe_client_ctx, 1);
+
+ MGMTD_FE_CLIENT_DBG("Initialized client '%s'", params->name);
+
+ return (uintptr_t)&mgmt_fe_client_ctx;
+}
+
+/*
+ * Create a new Session for a Frontend Client connection.
+ */
+enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl,
+ uint64_t client_id,
+ uintptr_t user_ctx)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = XCALLOC(MTYPE_MGMTD_FE_SESSION,
+ sizeof(struct mgmt_fe_client_session));
+ assert(session);
+ session->user_ctx = user_ctx;
+ session->client_id = client_id;
+ session->client_ctx = client_ctx;
+ session->session_id = 0;
+
+ if (mgmt_fe_send_session_req(client_ctx, session, true) != 0) {
+ XFREE(MTYPE_MGMTD_FE_SESSION, session);
+ return MGMTD_INTERNAL_ERROR;
+ }
+ mgmt_sessions_add_tail(&client_ctx->client_sessions, session);
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Delete an existing Session for a Frontend Client connection.
+ */
+enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl,
+ uint64_t client_id)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = mgmt_fe_find_session_by_client_id(client_ctx, client_id);
+ if (!session || session->client_ctx != client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (session->session_id &&
+ mgmt_fe_send_session_req(client_ctx, session, false) != 0)
+ MGMTD_FE_CLIENT_ERR(
+ "Failed to send session destroy request for the session-id %lu",
+ (unsigned long)session->session_id);
+
+ mgmt_sessions_del(&client_ctx->client_sessions, session);
+ XFREE(MTYPE_MGMTD_FE_SESSION, session);
+
+ return MGMTD_SUCCESS;
+}
+
+static void mgmt_fe_destroy_client_sessions(uintptr_t lib_hndl)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return;
+
+ FOREACH_SESSION_IN_LIST (client_ctx, session)
+ mgmt_fe_destroy_client_session(lib_hndl, session->client_id);
+}
+
+/*
+ * Send UN/LOCK_DS_REQ to MGMTD for a specific Datastore DS.
+ */
+enum mgmt_result mgmt_fe_lock_ds(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ bool lock_ds)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = (struct mgmt_fe_client_session *)session_id;
+ if (!session || session->client_ctx != client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (mgmt_fe_send_lockds_req(client_ctx, session, lock_ds, req_id,
+ ds_id)
+ != 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
+ */
+enum mgmt_result
+mgmt_fe_set_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangCfgDataReq **config_req, int num_reqs,
+ bool implicit_commit, Mgmtd__DatastoreId dst_ds_id)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = (struct mgmt_fe_client_session *)session_id;
+ if (!session || session->client_ctx != client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (mgmt_fe_send_setcfg_req(client_ctx, session, req_id, ds_id,
+ config_req, num_reqs, implicit_commit,
+ dst_ds_id)
+ != 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
+ */
+enum mgmt_result mgmt_fe_commit_config_data(uintptr_t lib_hndl,
+ uintptr_t session_id,
+ uint64_t req_id,
+ Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id,
+ bool validate_only, bool abort)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = (struct mgmt_fe_client_session *)session_id;
+ if (!session || session->client_ctx != client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (mgmt_fe_send_commitcfg_req(client_ctx, session, req_id, src_ds_id,
+ dst_ds_id, validate_only, abort)
+ != 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Send GET_CONFIG_REQ to MGMTD for one or more config data item(s).
+ */
+enum mgmt_result
+mgmt_fe_get_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq * data_req[], int num_reqs)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = (struct mgmt_fe_client_session *)session_id;
+ if (!session || session->client_ctx != client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (mgmt_fe_send_getcfg_req(client_ctx, session, req_id, ds_id,
+ data_req, num_reqs)
+ != 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Send GET_DATA_REQ to MGMTD for one or more config data item(s).
+ */
+enum mgmt_result mgmt_fe_get_data(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq * data_req[],
+ int num_reqs)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = (struct mgmt_fe_client_session *)session_id;
+ if (!session || session->client_ctx != client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (mgmt_fe_send_getdata_req(client_ctx, session, req_id, ds_id,
+ data_req, num_reqs)
+ != 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Send NOTIFY_REGISTER_REQ to MGMTD daemon.
+ */
+enum mgmt_result
+mgmt_fe_register_yang_notify(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ bool register_req,
+ Mgmtd__YangDataXPath * data_req[],
+ int num_reqs)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = (struct mgmt_fe_client_session *)session_id;
+ if (!session || session->client_ctx != client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (mgmt_fe_send_regnotify_req(client_ctx, session, req_id, ds_id,
+ register_req, data_req, num_reqs)
+ != 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Destroy library and cleanup everything.
+ */
+void mgmt_fe_client_lib_destroy(uintptr_t lib_hndl)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ assert(client_ctx);
+
+ MGMTD_FE_CLIENT_DBG("Destroying MGMTD Frontend Client '%s'",
+ client_ctx->client_params.name);
+
+ mgmt_fe_server_disconnect(client_ctx, false);
+
+ mgmt_fe_destroy_client_sessions(lib_hndl);
+
+ EVENT_OFF(client_ctx->conn_retry_tmr);
+ EVENT_OFF(client_ctx->conn_read_ev);
+ EVENT_OFF(client_ctx->conn_write_ev);
+ EVENT_OFF(client_ctx->conn_writes_on);
+ EVENT_OFF(client_ctx->msg_proc_ev);
+ mgmt_msg_destroy(&client_ctx->mstate);
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Library api interfaces
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_FE_CLIENT_H_
+#define _FRR_MGMTD_FE_CLIENT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mgmt_pb.h"
+#include "frrevent.h"
+#include "mgmtd/mgmt_defines.h"
+
+/***************************************************************
+ * Macros
+ ***************************************************************/
+
+/*
+ * The server port MGMTD daemon is listening for Backend Client
+ * connections.
+ */
+
+#define MGMTD_FE_CLIENT_ERROR_STRING_MAX_LEN 32
+
+#define MGMTD_FE_DEFAULT_CONN_RETRY_INTVL_SEC 5
+
+#define MGMTD_FE_MSG_PROC_DELAY_USEC 10
+#define MGMTD_FE_MAX_NUM_MSG_PROC 500
+
+#define MGMTD_FE_MSG_WRITE_DELAY_MSEC 1
+#define MGMTD_FE_MAX_NUM_MSG_WRITE 100
+
+#define GMGD_FE_MAX_NUM_REQ_ITEMS 64
+
+#define MGMTD_FE_MSG_MAX_LEN 9000
+
+#define MGMTD_SOCKET_FE_SEND_BUF_SIZE 65535
+#define MGMTD_SOCKET_FE_RECV_BUF_SIZE MGMTD_SOCKET_FE_SEND_BUF_SIZE
+
+/***************************************************************
+ * Data-structures
+ ***************************************************************/
+
+#define MGMTD_SESSION_ID_NONE 0
+
+#define MGMTD_CLIENT_ID_NONE 0
+
+#define MGMTD_DS_NONE MGMTD__DATASTORE_ID__DS_NONE
+#define MGMTD_DS_RUNNING MGMTD__DATASTORE_ID__RUNNING_DS
+#define MGMTD_DS_CANDIDATE MGMTD__DATASTORE_ID__CANDIDATE_DS
+#define MGMTD_DS_OPERATIONAL MGMTD__DATASTORE_ID__OPERATIONAL_DS
+#define MGMTD_DS_MAX_ID MGMTD_DS_OPERATIONAL + 1
+
+/*
+ * All the client specific information this library needs to
+ * initialize itself, setup connection with MGMTD FrontEnd interface
+ * and carry on all required procedures appropriately.
+ *
+ * FrontEnd clients need to initialise a instance of this structure
+ * with appropriate data and pass it while calling the API
+ * to initialize the library (See mgmt_fe_client_lib_init for
+ * more details).
+ */
+struct mgmt_fe_client_params {
+ char name[MGMTD_CLIENT_NAME_MAX_LEN];
+ uintptr_t user_data;
+ unsigned long conn_retry_intvl_sec;
+
+ void (*client_connect_notify)(uintptr_t lib_hndl,
+ uintptr_t user_data,
+ bool connected);
+
+ void (*client_session_notify)(uintptr_t lib_hndl,
+ uintptr_t user_data,
+ uint64_t client_id,
+ bool create, bool success,
+ uintptr_t session_id,
+ uintptr_t user_session_ctx);
+
+ void (*lock_ds_notify)(uintptr_t lib_hndl, uintptr_t user_data,
+ uint64_t client_id, uintptr_t session_id,
+ uintptr_t user_session_ctx, uint64_t req_id,
+ bool lock_ds, bool success,
+ Mgmtd__DatastoreId ds_id, char *errmsg_if_any);
+
+ void (*set_config_notify)(uintptr_t lib_hndl, uintptr_t user_data,
+ uint64_t client_id, uintptr_t session_id,
+ uintptr_t user_session_ctx, uint64_t req_id,
+ bool success, Mgmtd__DatastoreId ds_id,
+ char *errmsg_if_any);
+
+ void (*commit_config_notify)(
+ uintptr_t lib_hndl, uintptr_t user_data, uint64_t client_id,
+ uintptr_t session_id, uintptr_t user_session_ctx,
+ uint64_t req_id, bool success, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id, bool validate_only,
+ char *errmsg_if_any);
+
+ enum mgmt_result (*get_data_notify)(
+ uintptr_t lib_hndl, uintptr_t user_data, uint64_t client_id,
+ uintptr_t session_id, uintptr_t user_session_ctx,
+ uint64_t req_id, bool success, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangData **yang_data, size_t num_data, int next_key,
+ char *errmsg_if_any);
+
+ enum mgmt_result (*data_notify)(
+ uint64_t client_id, uint64_t session_id, uintptr_t user_data,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangData **yang_data, size_t num_data);
+};
+
+/***************************************************************
+ * API prototypes
+ ***************************************************************/
+
+/*
+ * Initialize library and try connecting with MGMTD FrontEnd interface.
+ *
+ * params
+ * Frontend client parameters.
+ *
+ * master_thread
+ * Thread master.
+ *
+ * Returns:
+ * Frontend client lib handler (nothing but address of mgmt_fe_client_ctx)
+ */
+extern uintptr_t mgmt_fe_client_lib_init(struct mgmt_fe_client_params *params,
+ struct event_loop *master_thread);
+
+/*
+ * Create a new Session for a Frontend Client connection.
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * client_id
+ * Unique identifier of client.
+ *
+ * user_ctx
+ * Client context.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl,
+ uint64_t client_id,
+ uintptr_t user_ctx);
+
+/*
+ * Delete an existing Session for a Frontend Client connection.
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * client_id
+ * Unique identifier of client.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl,
+ uint64_t client_id);
+
+/*
+ * Send UN/LOCK_DS_REQ to MGMTD for a specific Datastore DS.
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * session_id
+ * Client session ID.
+ *
+ * req_id
+ * Client request ID.
+ *
+ * ds_id
+ * Datastore ID (Running/Candidate/Oper/Startup)
+ *
+ * lock_ds
+ * TRUE for lock request, FALSE for unlock request.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_lock_ds(uintptr_t lib_hndl, uintptr_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, bool lock_ds);
+
+/*
+ * Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * session_id
+ * Client session ID.
+ *
+ * req_id
+ * Client request ID.
+ *
+ * ds_id
+ * Datastore ID (Running/Candidate/Oper/Startup)
+ *
+ * conf_req
+ * Details regarding the SET_CONFIG_REQ.
+ *
+ * num_req
+ * Number of config requests.
+ *
+ * implcit commit
+ * TRUE for implicit commit, FALSE otherwise.
+ *
+ * dst_ds_id
+ * Destination Datastore ID where data needs to be set.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_set_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangCfgDataReq **config_req, int num_req,
+ bool implicit_commit, Mgmtd__DatastoreId dst_ds_id);
+
+/*
+ * Send SET_COMMMIT_REQ to MGMTD for one or more config data(s).
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * session_id
+ * Client session ID.
+ *
+ * req_id
+ * Client request ID.
+ *
+ * src_ds_id
+ * Source datastore ID from where data needs to be committed from.
+ *
+ * dst_ds_id
+ * Destination datastore ID where data needs to be committed to.
+ *
+ * validate_only
+ * TRUE if data needs to be validated only, FALSE otherwise.
+ *
+ * abort
+ * TRUE if need to restore Src DS back to Dest DS, FALSE otherwise.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_commit_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id, bool validate_only,
+ bool abort);
+
+/*
+ * Send GET_CONFIG_REQ to MGMTD for one or more config data item(s).
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * session_id
+ * Client session ID.
+ *
+ * req_id
+ * Client request ID.
+ *
+ * ds_id
+ * Datastore ID (Running/Candidate)
+ *
+ * data_req
+ * Get config requested.
+ *
+ * num_req
+ * Number of get config requests.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_get_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq **data_req, int num_reqs);
+
+/*
+ * Send GET_DATA_REQ to MGMTD for one or more data item(s).
+ *
+ * Similar to get config request but supports getting data
+ * from operational ds aka backend clients directly.
+ */
+extern enum mgmt_result
+mgmt_fe_get_data(uintptr_t lib_hndl, uintptr_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, Mgmtd__YangGetDataReq **data_req,
+ int num_reqs);
+
+/*
+ * Send NOTIFY_REGISTER_REQ to MGMTD daemon.
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * session_id
+ * Client session ID.
+ *
+ * req_id
+ * Client request ID.
+ *
+ * ds_id
+ * Datastore ID.
+ *
+ * register_req
+ * TRUE if registering, FALSE otherwise.
+ *
+ * data_req
+ * Details of the YANG notification data.
+ *
+ * num_reqs
+ * Number of data requests.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_register_yang_notify(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ bool register_req,
+ Mgmtd__YangDataXPath **data_req, int num_reqs);
+
+/*
+ * Destroy library and cleanup everything.
+ */
+extern void mgmt_fe_client_lib_destroy(uintptr_t lib_hndl);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FRR_MGMTD_FE_CLIENT_H_ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * March 6 2023, Christian Hopps <chopps@labn.net>
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+#include <zebra.h>
+#include "network.h"
+#include "sockopt.h"
+#include "stream.h"
+#include "frrevent.h"
+#include "mgmt_msg.h"
+
+
+#define MGMT_MSG_DBG(dbgtag, fmt, ...) \
+ do { \
+ if (dbgtag) \
+ zlog_debug("%s: %s: " fmt, dbgtag, __func__, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define MGMT_MSG_ERR(ms, fmt, ...) \
+ zlog_err("%s: %s: " fmt, ms->idtag, __func__, ##__VA_ARGS__)
+
+/**
+ * Read data from a socket into streams containing 1 or more full msgs headed by
+ * mgmt_msg_hdr which contain API messages (currently protobuf).
+ *
+ * Args:
+ * ms: mgmt_msg_state for this process.
+ * fd: socket/file to read data from.
+ * debug: true to enable debug logging.
+ *
+ * Returns:
+ * MPP_DISCONNECT - socket should be closed and connect retried.
+ * MSV_SCHED_STREAM - this call should be rescheduled to run.
+ * MPP_SCHED_BOTH - this call and the procmsg buf should be scheduled to
+ *run.
+ */
+enum mgmt_msg_rsched mgmt_msg_read(struct mgmt_msg_state *ms, int fd,
+ bool debug)
+{
+ const char *dbgtag = debug ? ms->idtag : NULL;
+ size_t avail = STREAM_WRITEABLE(ms->ins);
+ struct mgmt_msg_hdr *mhdr = NULL;
+ size_t total = 0;
+ size_t mcount = 0;
+ ssize_t n, left;
+
+ assert(ms && fd != -1);
+
+ /*
+ * Read as much as we can into the stream.
+ */
+ while (avail > sizeof(struct mgmt_msg_hdr)) {
+ n = stream_read_try(ms->ins, fd, avail);
+ MGMT_MSG_DBG(dbgtag, "got %zd bytes", n);
+
+ /* -2 is normal nothing read, and to retry */
+ if (n == -2)
+ break;
+ if (n <= 0) {
+ if (n == 0)
+ MGMT_MSG_ERR(ms, "got EOF/disconnect");
+ else
+ MGMT_MSG_ERR(ms,
+ "got error while reading: '%s'",
+ safe_strerror(errno));
+ return MSR_DISCONNECT;
+ }
+ ms->nrxb += n;
+ avail -= n;
+ }
+
+ /*
+ * Check if we have read a complete messages or not.
+ */
+ assert(stream_get_getp(ms->ins) == 0);
+ left = stream_get_endp(ms->ins);
+ while (left > (long)sizeof(struct mgmt_msg_hdr)) {
+ mhdr = (struct mgmt_msg_hdr *)(STREAM_DATA(ms->ins) + total);
+ if (mhdr->marker != MGMT_MSG_MARKER) {
+ MGMT_MSG_DBG(dbgtag, "recv corrupt buffer, disconnect");
+ return MSR_DISCONNECT;
+ }
+ if ((ssize_t)mhdr->len > left)
+ break;
+
+ MGMT_MSG_DBG(dbgtag, "read full message len %u", mhdr->len);
+ total += mhdr->len;
+ left -= mhdr->len;
+ mcount++;
+ }
+
+ if (!mcount)
+ return MSR_SCHED_STREAM;
+
+ /*
+ * We have read at least one message into the stream, queue it up.
+ */
+ mhdr = (struct mgmt_msg_hdr *)(STREAM_DATA(ms->ins) + total);
+ stream_set_endp(ms->ins, total);
+ stream_fifo_push(&ms->inq, ms->ins);
+ ms->ins = stream_new(ms->max_msg_sz);
+ if (left) {
+ stream_put(ms->ins, mhdr, left);
+ stream_set_endp(ms->ins, left);
+ }
+
+ return MSR_SCHED_BOTH;
+}
+
+/**
+ * Process streams containing whole messages that have been pushed onto the
+ * FIFO. This should be called from an event/timer handler and should be
+ * reschedulable.
+ *
+ * Args:
+ * ms: mgmt_msg_state for this process.
+ * handle_mgs: function to call for each received message.
+ * user: opaque value passed through to handle_msg.
+ * debug: true to enable debug logging.
+ *
+ * Returns:
+ * true if more to process (so reschedule) else false
+ */
+bool mgmt_msg_procbufs(struct mgmt_msg_state *ms,
+ void (*handle_msg)(void *user, uint8_t *msg,
+ size_t msglen),
+ void *user, bool debug)
+{
+ const char *dbgtag = debug ? ms->idtag : NULL;
+ struct mgmt_msg_hdr *mhdr;
+ struct stream *work;
+ uint8_t *data;
+ size_t left, nproc;
+
+ MGMT_MSG_DBG(dbgtag, "Have %zu streams to process", ms->inq.count);
+
+ nproc = 0;
+ while (nproc < ms->max_read_buf) {
+ work = stream_fifo_pop(&ms->inq);
+ if (!work)
+ break;
+
+ data = STREAM_DATA(work);
+ left = stream_get_endp(work);
+ MGMT_MSG_DBG(dbgtag, "Processing stream of len %zu", left);
+
+ for (; left > sizeof(struct mgmt_msg_hdr);
+ left -= mhdr->len, data += mhdr->len) {
+ mhdr = (struct mgmt_msg_hdr *)data;
+
+ assert(mhdr->marker == MGMT_MSG_MARKER);
+ assert(left >= mhdr->len);
+
+ handle_msg(user, (uint8_t *)(mhdr + 1),
+ mhdr->len - sizeof(struct mgmt_msg_hdr));
+ ms->nrxm++;
+ nproc++;
+ }
+
+ if (work != ms->ins)
+ stream_free(work); /* Free it up */
+ else
+ stream_reset(work); /* Reset stream for next read */
+ }
+
+ /* return true if should reschedule b/c more to process. */
+ return stream_fifo_head(&ms->inq) != NULL;
+}
+
+/**
+ * Write data from a onto the socket, using streams that have been queued for
+ * sending by mgmt_msg_send_msg. This function should be reschedulable.
+ *
+ * Args:
+ * ms: mgmt_msg_state for this process.
+ * fd: socket/file to read data from.
+ * debug: true to enable debug logging.
+ *
+ * Returns:
+ * MSW_SCHED_NONE - do not reschedule anything.
+ * MSW_SCHED_STREAM - this call should be rescheduled to run again.
+ * MSW_SCHED_WRITES_OFF - writes should be disabled with a timer to
+ * re-enable them a short time later
+ * MSW_DISCONNECT - socket should be closed and reconnect retried.
+ *run.
+ */
+enum mgmt_msg_wsched mgmt_msg_write(struct mgmt_msg_state *ms, int fd,
+ bool debug)
+{
+ const char *dbgtag = debug ? ms->idtag : NULL;
+ struct stream *s;
+ size_t nproc = 0;
+ ssize_t left;
+ ssize_t n;
+
+ if (ms->outs) {
+ MGMT_MSG_DBG(dbgtag,
+ "found unqueued stream with %zu bytes, queueing",
+ stream_get_endp(ms->outs));
+ stream_fifo_push(&ms->outq, ms->outs);
+ ms->outs = NULL;
+ }
+
+ for (s = stream_fifo_head(&ms->outq); s && nproc < ms->max_write_buf;
+ s = stream_fifo_head(&ms->outq)) {
+ left = STREAM_READABLE(s);
+ assert(left);
+
+ n = stream_flush(s, fd);
+ if (n <= 0) {
+ if (n == 0)
+ MGMT_MSG_ERR(ms,
+ "connection closed while writing");
+ else if (ERRNO_IO_RETRY(errno)) {
+ MGMT_MSG_DBG(
+ dbgtag,
+ "retry error while writing %zd bytes: %s (%d)",
+ left, safe_strerror(errno), errno);
+ return MSW_SCHED_STREAM;
+ } else
+ MGMT_MSG_ERR(
+ ms,
+ "error while writing %zd bytes: %s (%d)",
+ left, safe_strerror(errno), errno);
+
+ n = mgmt_msg_reset_writes(ms);
+ MGMT_MSG_DBG(dbgtag, "drop and freed %zd streams", n);
+
+ return MSW_DISCONNECT;
+ }
+
+ ms->ntxb += n;
+ if (n != left) {
+ MGMT_MSG_DBG(dbgtag, "short stream write %zd of %zd", n,
+ left);
+ stream_forward_getp(s, n);
+ return MSW_SCHED_STREAM;
+ }
+
+ stream_free(stream_fifo_pop(&ms->outq));
+ MGMT_MSG_DBG(dbgtag, "wrote stream of %zd bytes", n);
+ nproc++;
+ }
+ if (s) {
+ MGMT_MSG_DBG(
+ dbgtag,
+ "reached %zu buffer writes, pausing with %zu streams left",
+ ms->max_write_buf, ms->outq.count);
+ return MSW_SCHED_WRITES_OFF;
+ }
+ MGMT_MSG_DBG(dbgtag, "flushed all streams from output q");
+ return MSW_SCHED_NONE;
+}
+
+
+/**
+ * Send a message by enqueueing it to be written over the socket by
+ * mgmt_msg_write.
+ *
+ * Args:
+ * ms: mgmt_msg_state for this process.
+ * fd: socket/file to read data from.
+ * debug: true to enable debug logging.
+ *
+ * Returns:
+ * 0 on success, otherwise -1 on failure. The only failure mode is if a
+ * the message exceeds the maximum message size configured on init.
+ */
+int mgmt_msg_send_msg(struct mgmt_msg_state *ms, void *msg, size_t len,
+ mgmt_msg_packf packf, bool debug)
+{
+ const char *dbgtag = debug ? ms->idtag : NULL;
+ struct mgmt_msg_hdr *mhdr;
+ struct stream *s;
+ uint8_t *dstbuf;
+ size_t endp, n;
+ size_t mlen = len + sizeof(*mhdr);
+
+ if (mlen > ms->max_msg_sz) {
+ MGMT_MSG_ERR(ms, "Message %zu > max size %zu, dropping", mlen,
+ ms->max_msg_sz);
+ return -1;
+ }
+
+ if (!ms->outs) {
+ MGMT_MSG_DBG(dbgtag, "creating new stream for msg len %zu",
+ len);
+ ms->outs = stream_new(ms->max_msg_sz);
+ } else if (STREAM_WRITEABLE(ms->outs) < mlen) {
+ MGMT_MSG_DBG(
+ dbgtag,
+ "enq existing stream len %zu and creating new stream for msg len %zu",
+ STREAM_WRITEABLE(ms->outs), mlen);
+ stream_fifo_push(&ms->outq, ms->outs);
+ ms->outs = stream_new(ms->max_msg_sz);
+ } else {
+ MGMT_MSG_DBG(
+ dbgtag,
+ "using existing stream with avail %zu for msg len %zu",
+ STREAM_WRITEABLE(ms->outs), mlen);
+ }
+ s = ms->outs;
+
+ /* We have a stream with space, pack the message into it. */
+ mhdr = (struct mgmt_msg_hdr *)(STREAM_DATA(s) + s->endp);
+ mhdr->marker = MGMT_MSG_MARKER;
+ mhdr->len = mlen;
+ stream_forward_endp(s, sizeof(*mhdr));
+ endp = stream_get_endp(s);
+ dstbuf = STREAM_DATA(s) + endp;
+ n = packf(msg, dstbuf);
+ stream_set_endp(s, endp + n);
+ ms->ntxm++;
+
+ return 0;
+}
+
+/**
+ * Create and open a unix domain stream socket on the given path
+ * setting non-blocking and send and receive buffer sizes.
+ *
+ * Args:
+ * path: path of unix domain socket to connect to.
+ * sendbuf: size of socket send buffer.
+ * recvbuf: size of socket receive buffer.
+ * dbgtag: if non-NULL enable log debug, and use this tag.
+ *
+ * Returns:
+ * socket fd or -1 on error.
+ */
+int mgmt_msg_connect(const char *path, size_t sendbuf, size_t recvbuf,
+ const char *dbgtag)
+{
+ int ret, sock, len;
+ struct sockaddr_un addr;
+
+ MGMT_MSG_DBG(dbgtag, "connecting to server on %s", path);
+ sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (sock < 0) {
+ MGMT_MSG_DBG(dbgtag, "socket failed: %s", safe_strerror(errno));
+ return -1;
+ }
+
+ memset(&addr, 0, sizeof(struct sockaddr_un));
+ addr.sun_family = AF_UNIX;
+ strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
+#ifdef HAVE_STRUCT_SOCKADDR_UN_SUN_LEN
+ len = addr.sun_len = SUN_LEN(&addr);
+#else
+ len = sizeof(addr.sun_family) + strlen(addr.sun_path);
+#endif /* HAVE_STRUCT_SOCKADDR_UN_SUN_LEN */
+ ret = connect(sock, (struct sockaddr *)&addr, len);
+ if (ret < 0) {
+ MGMT_MSG_DBG(dbgtag, "failed to connect on %s: %s", path,
+ safe_strerror(errno));
+ close(sock);
+ return -1;
+ }
+
+ MGMT_MSG_DBG(dbgtag, "connected to server on %s", path);
+ set_nonblocking(sock);
+ setsockopt_so_sendbuf(sock, sendbuf);
+ setsockopt_so_recvbuf(sock, recvbuf);
+ return sock;
+}
+
+/**
+ * Reset the sending queue, by dequeueing all streams and freeing them. Return
+ * the number of streams freed.
+ *
+ * Args:
+ * ms: mgmt_msg_state for this process.
+ *
+ * Returns:
+ * Number of streams that were freed.
+ *
+ */
+size_t mgmt_msg_reset_writes(struct mgmt_msg_state *ms)
+{
+ struct stream *s;
+ size_t nproc = 0;
+
+ for (s = stream_fifo_pop(&ms->outq); s;
+ s = stream_fifo_pop(&ms->outq), nproc++)
+ stream_free(s);
+
+ return nproc;
+}
+
+void mgmt_msg_init(struct mgmt_msg_state *ms, size_t max_read_buf,
+ size_t max_write_buf, size_t max_msg_sz, const char *idtag)
+{
+ memset(ms, 0, sizeof(*ms));
+ ms->ins = stream_new(max_msg_sz);
+ stream_fifo_init(&ms->inq);
+ stream_fifo_init(&ms->outq);
+ ms->max_read_buf = max_write_buf;
+ ms->max_write_buf = max_read_buf;
+ ms->max_msg_sz = max_msg_sz;
+ ms->idtag = strdup(idtag);
+}
+
+void mgmt_msg_destroy(struct mgmt_msg_state *ms)
+{
+ mgmt_msg_reset_writes(ms);
+ if (ms->ins)
+ stream_free(ms->ins);
+ free(ms->idtag);
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * March 6 2023, Christian Hopps <chopps@labn.net>
+ *
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+#ifndef _MGMT_MSG_H
+#define _MGMT_MSG_H
+
+#include "stream.h"
+#include "frrevent.h"
+
+#define MGMT_MSG_MARKER (0x4D724B21u) /* ASCII - "MrK!"*/
+
+struct mgmt_msg_state {
+ struct stream *ins;
+ struct stream *outs;
+ struct stream_fifo inq;
+ struct stream_fifo outq;
+ uint64_t nrxm; /* number of received messages */
+ uint64_t nrxb; /* number of received bytes */
+ uint64_t ntxm; /* number of sent messages */
+ uint64_t ntxb; /* number of sent bytes */
+ size_t max_read_buf; /* should replace with max time value */
+ size_t max_write_buf; /* should replace with max time value */
+ size_t max_msg_sz;
+ char *idtag; /* identifying tag for messages */
+};
+
+struct mgmt_msg_hdr {
+ uint32_t marker;
+ uint32_t len;
+};
+
+enum mgmt_msg_rsched {
+ MSR_SCHED_BOTH, /* schedule both queue and read */
+ MSR_SCHED_STREAM, /* schedule read */
+ MSR_DISCONNECT, /* disconnect and start reconnecting */
+};
+
+enum mgmt_msg_wsched {
+ MSW_SCHED_NONE, /* no scheduling required */
+ MSW_SCHED_STREAM, /* schedule writing */
+ MSW_SCHED_WRITES_OFF, /* toggle writes off */
+ MSW_DISCONNECT, /* disconnect and start reconnecting */
+};
+
+static inline uint8_t *msg_payload(struct mgmt_msg_hdr *mhdr)
+{
+ return (uint8_t *)(mhdr + 1);
+}
+
+typedef size_t (*mgmt_msg_packf)(void *msg, void *data);
+
+extern int mgmt_msg_connect(const char *path, size_t sendbuf, size_t recvbuf,
+ const char *dbgtag);
+extern void mgmt_msg_destroy(struct mgmt_msg_state *ms);
+extern void mgmt_msg_init(struct mgmt_msg_state *ms, size_t max_read_buf,
+ size_t max_write_buf, size_t max_msg_sz,
+ const char *idtag);
+extern bool mgmt_msg_procbufs(struct mgmt_msg_state *ms,
+ void (*handle_msg)(void *user, uint8_t *msg,
+ size_t msglen),
+ void *user, bool debug);
+extern enum mgmt_msg_rsched mgmt_msg_read(struct mgmt_msg_state *ms, int fd,
+ bool debug);
+extern size_t mgmt_msg_reset_writes(struct mgmt_msg_state *ms);
+extern int mgmt_msg_send_msg(struct mgmt_msg_state *ms, void *msg, size_t len,
+ size_t (*packf)(void *msg, void *buf), bool debug);
+extern enum mgmt_msg_wsched mgmt_msg_write(struct mgmt_msg_state *ms, int fd,
+ bool debug);
+
+#endif /* _MGMT_MSG_H */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD protobuf main header file
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_PB_H_
+#define _FRR_MGMTD_PB_H_
+
+#include "lib/mgmt.pb-c.h"
+
+#define mgmt_yang_data_xpath_init(ptr) mgmtd__yang_data_xpath__init(ptr)
+
+#define mgmt_yang_data_value_init(ptr) mgmtd__yang_data_value__init(ptr)
+
+#define mgmt_yang_data_init(ptr) mgmtd__yang_data__init(ptr)
+
+#define mgmt_yang_data_reply_init(ptr) mgmtd__yang_data_reply__init(ptr)
+
+#define mgmt_yang_cfg_data_req_init(ptr) mgmtd__yang_cfg_data_req__init(ptr)
+
+#define mgmt_yang_get_data_req_init(ptr) mgmtd__yang_get_data_req__init(ptr)
+
+#endif /* _FRR_MGMTD_PB_H_ */
{
struct nb_node *nb_node;
struct lysc_node *sparent, *sparent_list;
+ struct frr_yang_module_info *module;
+ module = (struct frr_yang_module_info *)arg;
nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
sizeof(nb_node->xpath));
assert(snode->priv == NULL);
((struct lysc_node *)snode)->priv = nb_node;
+ if (module && module->ignore_cbs)
+ SET_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS);
+
return YANG_ITER_CONTINUE;
}
{
unsigned int error = 0;
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+ return error;
+
error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
!!nb_node->cbs.create, false);
error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
config->dnode = yang_dnode_new(ly_native_ctx, true);
config->version = 0;
+ RB_INIT(nb_config_cbs, &config->cfg_chgs);
+
return config;
}
{
if (config->dnode)
yang_dnode_free(config->dnode);
+ nb_config_diff_del_changes(&config->cfg_chgs);
XFREE(MTYPE_NB_CONFIG, config);
}
dup->dnode = yang_dnode_dup(config->dnode);
dup->version = config->version;
+ RB_INIT(nb_config_cbs, &dup->cfg_chgs);
+
return dup;
}
RB_INSERT(nb_config_cbs, changes, &change->cb);
}
-static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
+void nb_config_diff_del_changes(struct nb_config_cbs *changes)
{
while (!RB_EMPTY(nb_config_cbs, changes)) {
struct nb_config_change *change;
* configurations. Given a new subtree, calculate all new YANG data nodes,
* excluding default leafs and leaf-lists. This is a recursive function.
*/
-static void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
- struct nb_config_cbs *changes)
+void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
+ struct nb_config_cbs *changes)
{
enum nb_operation operation;
struct lyd_node *child;
}
#endif
-/* Calculate the delta between two different configurations. */
-static void nb_config_diff(const struct nb_config *config1,
- const struct nb_config *config2,
- struct nb_config_cbs *changes)
+/*
+ * Calculate the delta between two different configurations.
+ *
+ * NOTE: 'config1' is the reference DB, while 'config2' is
+ * the DB being compared against 'config1'. Typically 'config1'
+ * should be the Running DB and 'config2' is the Candidate DB.
+ */
+void nb_config_diff(const struct nb_config *config1,
+ const struct nb_config *config2,
+ struct nb_config_cbs *changes)
{
struct lyd_node *diff = NULL;
const struct lyd_node *root, *dnode;
return NB_OK;
}
+static void nb_update_candidate_changes(struct nb_config *candidate,
+ struct nb_cfg_change *change,
+ uint32_t *seq)
+{
+ enum nb_operation oper = change->operation;
+ char *xpath = change->xpath;
+ struct lyd_node *root = NULL;
+ struct lyd_node *dnode;
+ struct nb_config_cbs *cfg_chgs = &candidate->cfg_chgs;
+ int op;
+
+ switch (oper) {
+ case NB_OP_CREATE:
+ case NB_OP_MODIFY:
+ root = yang_dnode_get(candidate->dnode, xpath);
+ break;
+ case NB_OP_DESTROY:
+ root = yang_dnode_get(running_config->dnode, xpath);
+ /* code */
+ break;
+ case NB_OP_MOVE:
+ case NB_OP_PRE_VALIDATE:
+ case NB_OP_APPLY_FINISH:
+ case NB_OP_GET_ELEM:
+ case NB_OP_GET_NEXT:
+ case NB_OP_GET_KEYS:
+ case NB_OP_LOOKUP_ENTRY:
+ case NB_OP_RPC:
+ break;
+ default:
+ assert(!"non-enum value, invalid");
+ }
+
+ if (!root)
+ return;
+
+ LYD_TREE_DFS_BEGIN (root, dnode) {
+ op = nb_lyd_diff_get_op(dnode);
+ switch (op) {
+ case 'c':
+ nb_config_diff_created(dnode, seq, cfg_chgs);
+ LYD_TREE_DFS_continue = 1;
+ break;
+ case 'd':
+ nb_config_diff_deleted(dnode, seq, cfg_chgs);
+ LYD_TREE_DFS_continue = 1;
+ break;
+ case 'r':
+ nb_config_diff_add_change(cfg_chgs, NB_OP_MODIFY, seq,
+ dnode);
+ break;
+ default:
+ break;
+ }
+ LYD_TREE_DFS_END(root, dnode);
+ }
+}
+
+static bool nb_is_operation_allowed(struct nb_node *nb_node,
+ struct nb_cfg_change *change)
+{
+ enum nb_operation oper = change->operation;
+
+ if (lysc_is_key(nb_node->snode)) {
+ if (oper == NB_OP_MODIFY || oper == NB_OP_DESTROY)
+ return false;
+ }
+ return true;
+}
+
+void nb_candidate_edit_config_changes(
+ struct nb_config *candidate_config, struct nb_cfg_change cfg_changes[],
+ size_t num_cfg_changes, const char *xpath_base, const char *curr_xpath,
+ int xpath_index, char *err_buf, int err_bufsize, bool *error)
+{
+ uint32_t seq = 0;
+
+ if (error)
+ *error = false;
+
+ if (xpath_base == NULL)
+ xpath_base = "";
+
+ /* Edit candidate configuration. */
+ for (size_t i = 0; i < num_cfg_changes; i++) {
+ struct nb_cfg_change *change = &cfg_changes[i];
+ struct nb_node *nb_node;
+ char xpath[XPATH_MAXLEN];
+ struct yang_data *data;
+ int ret;
+
+ /* Handle relative XPaths. */
+ memset(xpath, 0, sizeof(xpath));
+ if (xpath_index > 0 &&
+ (xpath_base[0] == '.' || change->xpath[0] == '.'))
+ strlcpy(xpath, curr_xpath, sizeof(xpath));
+ if (xpath_base[0]) {
+ if (xpath_base[0] == '.')
+ strlcat(xpath, xpath_base + 1, sizeof(xpath));
+ else
+ strlcat(xpath, xpath_base, sizeof(xpath));
+ }
+ if (change->xpath[0] == '.')
+ strlcat(xpath, change->xpath + 1, sizeof(xpath));
+ else
+ strlcpy(xpath, change->xpath, sizeof(xpath));
+
+ /* Find the northbound node associated to the data path. */
+ nb_node = nb_node_find(xpath);
+ if (!nb_node) {
+ flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
+ "%s: unknown data path: %s", __func__, xpath);
+ if (error)
+ *error = true;
+ continue;
+ }
+ /* Find if the node to be edited is not a key node */
+ if (!nb_is_operation_allowed(nb_node, change)) {
+ zlog_err(" Xpath %s points to key node", xpath);
+ if (error)
+ *error = true;
+ break;
+ }
+
+ /* If the value is not set, get the default if it exists. */
+ if (change->value == NULL)
+ change->value = yang_snode_get_default(nb_node->snode);
+ data = yang_data_new(xpath, change->value);
+
+ /*
+ * Ignore "not found" errors when editing the candidate
+ * configuration.
+ */
+ ret = nb_candidate_edit(candidate_config, nb_node,
+ change->operation, xpath, NULL, data);
+ yang_data_free(data);
+ if (ret != NB_OK && ret != NB_ERR_NOT_FOUND) {
+ flog_warn(
+ EC_LIB_NB_CANDIDATE_EDIT_ERROR,
+ "%s: failed to edit candidate configuration: operation [%s] xpath [%s]",
+ __func__, nb_operation_name(change->operation),
+ xpath);
+ if (error)
+ *error = true;
+ continue;
+ }
+ nb_update_candidate_changes(candidate_config, change, &seq);
+ }
+
+ if (error && *error) {
+ char buf[BUFSIZ];
+
+ /*
+ * Failure to edit the candidate configuration should never
+ * happen in practice, unless there's a bug in the code. When
+ * that happens, log the error but otherwise ignore it.
+ */
+ snprintf(err_buf, err_bufsize,
+ "%% Failed to edit configuration.\n\n%s",
+ yang_print_errors(ly_native_ctx, buf, sizeof(buf)));
+ }
+}
+
bool nb_candidate_needs_update(const struct nb_config *candidate)
{
if (candidate->version < running_config->version)
* WARNING: lyd_validate() can change the configuration as part of the
* validation process.
*/
-static int nb_candidate_validate_yang(struct nb_config *candidate, char *errmsg,
- size_t errmsg_len)
+int nb_candidate_validate_yang(struct nb_config *candidate, bool no_state,
+ char *errmsg, size_t errmsg_len)
{
if (lyd_validate_all(&candidate->dnode, ly_native_ctx,
- LYD_VALIDATE_NO_STATE, NULL)
- != 0) {
+ no_state ? LYD_VALIDATE_NO_STATE
+ : LYD_VALIDATE_PRESENT,
+ NULL) != 0) {
yang_print_errors(ly_native_ctx, errmsg, errmsg_len);
return NB_ERR_VALIDATION;
}
}
/* Perform code-level validation using the northbound callbacks. */
-static int nb_candidate_validate_code(struct nb_context *context,
- struct nb_config *candidate,
- struct nb_config_cbs *changes,
- char *errmsg, size_t errmsg_len)
+int nb_candidate_validate_code(struct nb_context *context,
+ struct nb_config *candidate,
+ struct nb_config_cbs *changes, char *errmsg,
+ size_t errmsg_len)
{
struct nb_config_cb *cb;
struct lyd_node *root, *child;
return NB_OK;
}
+int nb_candidate_diff_and_validate_yang(struct nb_context *context,
+ struct nb_config *candidate,
+ struct nb_config_cbs *changes,
+ char *errmsg, size_t errmsg_len)
+{
+ if (nb_candidate_validate_yang(candidate, true, errmsg,
+ sizeof(errmsg_len)) != NB_OK)
+ return NB_ERR_VALIDATION;
+
+ RB_INIT(nb_config_cbs, changes);
+ nb_config_diff(running_config, candidate, changes);
+
+ return NB_OK;
+}
+
int nb_candidate_validate(struct nb_context *context,
struct nb_config *candidate, char *errmsg,
size_t errmsg_len)
struct nb_config_cbs changes;
int ret;
- if (nb_candidate_validate_yang(candidate, errmsg, errmsg_len) != NB_OK)
- return NB_ERR_VALIDATION;
+ ret = nb_candidate_diff_and_validate_yang(context, candidate, &changes,
+ errmsg, errmsg_len);
+ if (ret != NB_OK)
+ return ret;
- RB_INIT(nb_config_cbs, &changes);
- nb_config_diff(running_config, candidate, &changes);
ret = nb_candidate_validate_code(context, candidate, &changes, errmsg,
errmsg_len);
nb_config_diff_del_changes(&changes);
struct nb_config *candidate,
const char *comment,
struct nb_transaction **transaction,
+ bool skip_validate, bool ignore_zero_change,
char *errmsg, size_t errmsg_len)
{
struct nb_config_cbs changes;
- if (nb_candidate_validate_yang(candidate, errmsg, errmsg_len)
- != NB_OK) {
+ if (!skip_validate &&
+ nb_candidate_validate_yang(candidate, true, errmsg, errmsg_len) !=
+ NB_OK) {
flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
"%s: failed to validate candidate configuration",
__func__);
RB_INIT(nb_config_cbs, &changes);
nb_config_diff(running_config, candidate, &changes);
- if (RB_EMPTY(nb_config_cbs, &changes)) {
+ if (!ignore_zero_change && RB_EMPTY(nb_config_cbs, &changes)) {
snprintf(
errmsg, errmsg_len,
"No changes to apply were found during preparation phase");
return NB_ERR_NO_CHANGES;
}
- if (nb_candidate_validate_code(&context, candidate, &changes, errmsg,
+ if (!skip_validate &&
+ nb_candidate_validate_code(&context, candidate, &changes, errmsg,
errmsg_len) != NB_OK) {
flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
"%s: failed to validate candidate configuration",
return NB_ERR_VALIDATION;
}
- *transaction = nb_transaction_new(context, candidate, &changes, comment,
- errmsg, errmsg_len);
+ /*
+ * Re-use an existing transaction if provided. Else allocate a new one.
+ */
+ if (!*transaction)
+ *transaction = nb_transaction_new(context, candidate, &changes,
+ comment, errmsg, errmsg_len);
if (*transaction == NULL) {
flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
"%s: failed to create transaction: %s", __func__,
int ret;
ret = nb_candidate_commit_prepare(context, candidate, comment,
- &transaction, errmsg, errmsg_len);
+ &transaction, false, false, errmsg,
+ errmsg_len);
/*
* Apply the changes if the preparation phase succeeded. Otherwise abort
* the transaction.
bool unexpected_error = false;
int ret;
+ assert(!CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS));
+
nb_log_config_callback(event, NB_OP_CREATE, dnode);
args.context = context;
bool unexpected_error = false;
int ret;
+ assert(!CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS));
+
nb_log_config_callback(event, NB_OP_MODIFY, dnode);
args.context = context;
bool unexpected_error = false;
int ret;
+ assert(!CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS));
+
nb_log_config_callback(event, NB_OP_DESTROY, dnode);
args.context = context;
bool unexpected_error = false;
int ret;
+ assert(!CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS));
+
nb_log_config_callback(event, NB_OP_MOVE, dnode);
args.context = context;
bool unexpected_error = false;
int ret;
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+ return 0;
+
nb_log_config_callback(NB_EV_VALIDATE, NB_OP_PRE_VALIDATE, dnode);
args.dnode = dnode;
{
struct nb_cb_apply_finish_args args = {};
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+ return;
+
nb_log_config_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH, dnode);
args.context = context;
{
struct nb_cb_get_elem_args args = {};
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+ return NULL;
+
DEBUGD(&nb_dbg_cbs_state,
"northbound callback (get_elem): xpath [%s] list_entry [%p]",
xpath, list_entry);
{
struct nb_cb_get_next_args args = {};
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+ return NULL;
+
DEBUGD(&nb_dbg_cbs_state,
"northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
nb_node->xpath, parent_list_entry, list_entry);
{
struct nb_cb_get_keys_args args = {};
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+ return 0;
+
DEBUGD(&nb_dbg_cbs_state,
"northbound callback (get_keys): node [%s] list_entry [%p]",
nb_node->xpath, list_entry);
{
struct nb_cb_lookup_entry_args args = {};
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+ return NULL;
+
DEBUGD(&nb_dbg_cbs_state,
"northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
nb_node->xpath, parent_list_entry);
{
struct nb_cb_rpc_args args = {};
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+ return 0;
+
DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
args.xpath = xpath;
union nb_resource *resource;
int ret = NB_ERR;
+ if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+ return NB_OK;
+
if (event == NB_EV_VALIDATE)
resource = NULL;
else
/* Iterate over all list entries. */
do {
const struct lysc_node_leaf *skey;
- struct yang_list_keys list_keys;
+ struct yang_list_keys list_keys = {};
char xpath[XPATH_MAXLEN * 2];
int ret;
return "gRPC";
case NB_CLIENT_PCEP:
return "Pcep";
+ case NB_CLIENT_MGMTD_SERVER:
+ return "MGMTD Server";
+ case NB_CLIENT_MGMTD_BE:
+ return "MGMT Backend";
case NB_CLIENT_NONE:
return "None";
}
static void nb_load_callbacks(const struct frr_yang_module_info *module)
{
+
+ if (module->ignore_cbs)
+ return;
+
for (size_t i = 0; module->nodes[i].xpath; i++) {
struct nb_node *nb_node;
uint32_t priority;
}
-void nb_init(struct thread_master *tm,
+void nb_init(struct event_loop *tm,
const struct frr_yang_module_info *const modules[],
size_t nmodules, bool db_enabled)
{
/* Initialize the compiled nodes with northbound data */
for (size_t i = 0; i < nmodules; i++) {
- yang_snodes_iterate(loaded[i]->info, nb_node_new_cb, 0, NULL);
+ yang_snodes_iterate(loaded[i]->info, nb_node_new_cb, 0,
+ (void *)modules[i]);
nb_load_callbacks(modules[i]);
}
nb_nodes_delete();
/* Delete the running configuration. */
- hash_clean(running_config_entries, running_config_entry_free);
- hash_free(running_config_entries);
+ hash_clean_and_free(&running_config_entries, running_config_entry_free);
nb_config_free(running_config);
pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
}
#ifndef _FRR_NORTHBOUND_H_
#define _FRR_NORTHBOUND_H_
-#include "thread.h"
+#include "frrevent.h"
#include "hook.h"
#include "linklist.h"
#include "openbsd-tree.h"
struct vty;
struct debug;
+struct nb_yang_xpath_tag {
+ uint32_t ns;
+ uint32_t id;
+};
+
+struct nb_yang_value {
+ struct lyd_value value;
+ LY_DATA_TYPE value_type;
+ uint8_t value_flags;
+};
+
+struct nb_yang_xpath_elem {
+ struct nb_yang_xpath_tag tag;
+ struct nb_yang_value val;
+};
+
+#define NB_MAX_NUM_KEYS UINT8_MAX
+#define NB_MAX_NUM_XPATH_TAGS UINT8_MAX
+
+struct nb_yang_xpath {
+ uint8_t length;
+ struct {
+ uint8_t num_keys;
+ struct nb_yang_xpath_elem keys[NB_MAX_NUM_KEYS];
+ } tags[NB_MAX_NUM_XPATH_TAGS];
+};
+
+#define NB_YANG_XPATH_KEY(__xpath, __indx1, __indx2) \
+ ((__xpath->num_tags > __indx1) && \
+ (__xpath->tags[__indx1].num_keys > __indx2) \
+ ? &__xpath->tags[__indx1].keys[__indx2] \
+ : NULL)
+
/* Northbound events. */
enum nb_event {
/*
NB_OP_RPC,
};
+struct nb_cfg_change {
+ char xpath[XPATH_MAXLEN];
+ enum nb_operation operation;
+ const char *value;
+};
+
union nb_resource {
int fd;
void *ptr;
#define F_NB_NODE_CONFIG_ONLY 0x01
/* The YANG list doesn't contain key leafs. */
#define F_NB_NODE_KEYLESS_LIST 0x02
+/* Ignore callbacks for this node */
+#define F_NB_NODE_IGNORE_CBS 0x04
/*
* HACK: old gcc versions (< 5.x) have a bug that prevents C99 flexible arrays
/* YANG module name. */
const char *name;
+ /*
+ * Ignore callbacks for this module. Set this to true to
+ * load module without any callbacks.
+ */
+ bool ignore_cbs;
+
/* Northbound callbacks. */
const struct {
/* Data path of this YANG node. */
NB_CLIENT_SYSREPO,
NB_CLIENT_GRPC,
NB_CLIENT_PCEP,
+ NB_CLIENT_MGMTD_SERVER,
+ NB_CLIENT_MGMTD_BE,
};
/* Northbound context. */
const void *user;
};
-/* Northbound configuration. */
-struct nb_config {
- struct lyd_node *dnode;
- uint32_t version;
-};
-
/* Northbound configuration callback. */
struct nb_config_cb {
RB_ENTRY(nb_config_cb) entry;
struct nb_config_cbs changes;
};
+/* Northbound configuration. */
+struct nb_config {
+ struct lyd_node *dnode;
+ uint32_t version;
+ struct nb_config_cbs cfg_chgs;
+};
+
/* Callback function used by nb_oper_data_iterate(). */
typedef int (*nb_oper_data_cb)(const struct lysc_node *snode,
struct yang_translator *translator,
const struct yang_data *previous,
const struct yang_data *data);
+/*
+ * Create diff for configuration.
+ *
+ * dnode
+ * Pointer to a libyang data node containing the configuration data. If NULL
+ * is given, an empty configuration will be created.
+ *
+ * seq
+ * Returns sequence number assigned to the specific change.
+ *
+ * changes
+ * Northbound config callback head.
+ */
+extern void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
+ struct nb_config_cbs *changes);
+
/*
* Check if a candidate configuration is outdated and needs to be updated.
*
*/
extern bool nb_candidate_needs_update(const struct nb_config *candidate);
+/*
+ * Edit candidate configuration changes.
+ *
+ * candidate_config
+ * Candidate configuration to edit.
+ *
+ * cfg_changes
+ * Northbound config changes.
+ *
+ * num_cfg_changes
+ * Number of config changes.
+ *
+ * xpath_base
+ * Base xpath for config.
+ *
+ * curr_xpath
+ * Current xpath for config.
+ *
+ * xpath_index
+ * Index of xpath being processed.
+ *
+ * err_buf
+ * Buffer to store human-readable error message in case of error.
+ *
+ * err_bufsize
+ * Size of err_buf.
+ *
+ * error
+ * TRUE on error, FALSE on success
+ */
+extern void nb_candidate_edit_config_changes(
+ struct nb_config *candidate_config, struct nb_cfg_change cfg_changes[],
+ size_t num_cfg_changes, const char *xpath_base, const char *curr_xpath,
+ int xpath_index, char *err_buf, int err_bufsize, bool *error);
+
+/*
+ * Delete candidate configuration changes.
+ *
+ * changes
+ * Northbound config changes.
+ */
+extern void nb_config_diff_del_changes(struct nb_config_cbs *changes);
+
+/*
+ * Create candidate diff and validate on yang tree
+ *
+ * context
+ * Context of the northbound transaction.
+ *
+ * candidate
+ * Candidate DB configuration.
+ *
+ * changes
+ * Northbound config changes.
+ *
+ * errmsg
+ * Buffer to store human-readable error message in case of error.
+ *
+ * errmsg_len
+ * Size of errmsg.
+ *
+ * Returns:
+ * NB_OK on success, NB_ERR_VALIDATION otherwise
+ */
+extern int nb_candidate_diff_and_validate_yang(struct nb_context *context,
+ struct nb_config *candidate,
+ struct nb_config_cbs *changes,
+ char *errmsg, size_t errmsg_len);
+
+/*
+ * Calculate the delta between two different configurations.
+ *
+ * reference
+ * Running DB config changes to be compared against.
+ *
+ * incremental
+ * Candidate DB config changes that will be compared against reference.
+ *
+ * changes
+ * Will hold the final diff generated.
+ *
+ */
+extern void nb_config_diff(const struct nb_config *reference,
+ const struct nb_config *incremental,
+ struct nb_config_cbs *changes);
+
+/*
+ * Perform YANG syntactic and semantic validation.
+ *
+ * WARNING: lyd_validate() can change the configuration as part of the
+ * validation process.
+ *
+ * candidate
+ * Candidate DB configuration.
+ *
+ * errmsg
+ * Buffer to store human-readable error message in case of error.
+ *
+ * errmsg_len
+ * Size of errmsg.
+ *
+ * Returns:
+ * NB_OK on success, NB_ERR_VALIDATION otherwise
+ */
+extern int nb_candidate_validate_yang(struct nb_config *candidate,
+ bool no_state, char *errmsg,
+ size_t errmsg_len);
+
+/*
+ * Perform code-level validation using the northbound callbacks.
+ *
+ * context
+ * Context of the northbound transaction.
+ *
+ * candidate
+ * Candidate DB configuration.
+ *
+ * changes
+ * Northbound config changes.
+ *
+ * errmsg
+ * Buffer to store human-readable error message in case of error.
+ *
+ * errmsg_len
+ * Size of errmsg.
+ *
+ * Returns:
+ * NB_OK on success, NB_ERR_VALIDATION otherwise
+ */
+extern int nb_candidate_validate_code(struct nb_context *context,
+ struct nb_config *candidate,
+ struct nb_config_cbs *changes,
+ char *errmsg, size_t errmsg_len);
+
/*
* Update a candidate configuration by rebasing the changes on top of the latest
* running configuration. Resolve conflicts automatically by giving preference
* nb_candidate_commit_abort() or committed using
* nb_candidate_commit_apply().
*
+ * skip_validate
+ * TRUE to skip commit validation, FALSE otherwise.
+ *
+ * ignore_zero_change
+ * TRUE to ignore if zero changes, FALSE otherwise.
+ *
* errmsg
* Buffer to store human-readable error message in case of error.
*
struct nb_config *candidate,
const char *comment,
struct nb_transaction **transaction,
- char *errmsg, size_t errmsg_len);
+ bool skip_validate,
+ bool ignore_zero_change, char *errmsg,
+ size_t errmsg_len);
/*
* Abort a previously created configuration transaction, releasing all resources
* db_enabled
* Set this to record the transactions in the transaction log.
*/
-extern void nb_init(struct thread_master *tm,
+extern void nb_init(struct event_loop *tm,
const struct frr_yang_module_info *const modules[],
size_t nmodules, bool db_enabled);
struct debug nb_dbg_libyang = {0, "libyang debugging"};
struct nb_config *vty_shared_candidate_config;
-static struct thread_master *master;
+static struct event_loop *master;
static void vty_show_nb_errors(struct vty *vty, int error, const char *errmsg)
{
void nb_cli_enqueue_change(struct vty *vty, const char *xpath,
enum nb_operation operation, const char *value)
{
- struct vty_cfg_change *change;
+ struct nb_cfg_change *change;
if (vty->num_cfg_changes == VTY_MAXCFGCHANGES) {
/* Not expected to happen. */
bool clear_pending)
{
bool error = false;
-
- if (xpath_base == NULL)
- xpath_base = "";
+ char buf[BUFSIZ];
VTY_CHECK_XPATH;
- /* Edit candidate configuration. */
- for (size_t i = 0; i < vty->num_cfg_changes; i++) {
- struct vty_cfg_change *change = &vty->cfg_changes[i];
- struct nb_node *nb_node;
- char xpath[XPATH_MAXLEN];
- struct yang_data *data;
- int ret;
-
- /* Handle relative XPaths. */
- memset(xpath, 0, sizeof(xpath));
- if (vty->xpath_index > 0
- && (xpath_base[0] == '.' || change->xpath[0] == '.'))
- strlcpy(xpath, VTY_CURR_XPATH, sizeof(xpath));
- if (xpath_base[0]) {
- if (xpath_base[0] == '.')
- strlcat(xpath, xpath_base + 1, sizeof(xpath));
- else
- strlcat(xpath, xpath_base, sizeof(xpath));
- }
- if (change->xpath[0] == '.')
- strlcat(xpath, change->xpath + 1, sizeof(xpath));
- else
- strlcpy(xpath, change->xpath, sizeof(xpath));
-
- /* Find the northbound node associated to the data path. */
- nb_node = nb_node_find(xpath);
- if (!nb_node) {
- flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
- "%s: unknown data path: %s", __func__, xpath);
- error = true;
- continue;
- }
-
- /* If the value is not set, get the default if it exists. */
- if (change->value == NULL)
- change->value = yang_snode_get_default(nb_node->snode);
- data = yang_data_new(xpath, change->value);
-
- /*
- * Ignore "not found" errors when editing the candidate
- * configuration.
- */
- ret = nb_candidate_edit(vty->candidate_config, nb_node,
- change->operation, xpath, NULL, data);
- yang_data_free(data);
- if (ret != NB_OK && ret != NB_ERR_NOT_FOUND) {
- flog_warn(
- EC_LIB_NB_CANDIDATE_EDIT_ERROR,
- "%s: failed to edit candidate configuration: operation [%s] xpath [%s]",
- __func__, nb_operation_name(change->operation),
- xpath);
- error = true;
- continue;
- }
- }
-
+ nb_candidate_edit_config_changes(
+ vty->candidate_config, vty->cfg_changes, vty->num_cfg_changes,
+ xpath_base, VTY_CURR_XPATH, vty->xpath_index, buf, sizeof(buf),
+ &error);
if (error) {
- char buf[BUFSIZ];
-
/*
* Failure to edit the candidate configuration should never
* happen in practice, unless there's a bug in the code. When
* that happens, log the error but otherwise ignore it.
*/
- vty_out(vty, "%% Failed to edit configuration.\n\n");
- vty_out(vty, "%s",
- yang_print_errors(ly_native_ctx, buf, sizeof(buf)));
+ vty_out(vty, "%s", buf);
}
/*
int nb_cli_apply_changes(struct vty *vty, const char *xpath_base_fmt, ...)
{
char xpath_base[XPATH_MAXLEN] = {};
+ bool implicit_commit;
+ int ret;
/* Parse the base XPath format string. */
if (xpath_base_fmt) {
vsnprintf(xpath_base, sizeof(xpath_base), xpath_base_fmt, ap);
va_end(ap);
}
+
+ if (vty_mgmt_fe_enabled()) {
+ VTY_CHECK_XPATH;
+
+ implicit_commit = vty_needs_implicit_commit(vty);
+ ret = vty_mgmt_send_config_data(vty);
+ if (ret >= 0 && !implicit_commit)
+ vty->mgmt_num_pending_setcfg++;
+ return ret;
+ }
+
return nb_cli_apply_changes_internal(vty, xpath_base, false);
}
const char *xpath_base_fmt, ...)
{
char xpath_base[XPATH_MAXLEN] = {};
+ bool implicit_commit;
+ int ret;
/* Parse the base XPath format string. */
if (xpath_base_fmt) {
vsnprintf(xpath_base, sizeof(xpath_base), xpath_base_fmt, ap);
va_end(ap);
}
+
+ if (vty_mgmt_fe_enabled()) {
+ VTY_CHECK_XPATH;
+
+ implicit_commit = vty_needs_implicit_commit(vty);
+ ret = vty_mgmt_send_config_data(vty);
+ if (ret >= 0 && !implicit_commit)
+ vty->mgmt_num_pending_setcfg++;
+ return ret;
+ }
+
return nb_cli_apply_changes_internal(vty, xpath_base, true);
}
void nb_cli_confirmed_commit_clean(struct vty *vty)
{
- thread_cancel(&vty->t_confirmed_commit_timeout);
+ event_cancel(&vty->t_confirmed_commit_timeout);
nb_config_free(vty->confirmed_commit_rollback);
vty->confirmed_commit_rollback = NULL;
}
return ret;
}
-static void nb_cli_confirmed_commit_timeout(struct thread *thread)
+static void nb_cli_confirmed_commit_timeout(struct event *thread)
{
- struct vty *vty = THREAD_ARG(thread);
+ struct vty *vty = EVENT_ARG(thread);
/* XXX: broadcast this message to all logged-in users? */
vty_out(vty,
"%% Resetting confirmed-commit timeout to %u minute(s)\n\n",
confirmed_timeout);
- thread_cancel(&vty->t_confirmed_commit_timeout);
- thread_add_timer(master,
- nb_cli_confirmed_commit_timeout, vty,
- confirmed_timeout * 60,
- &vty->t_confirmed_commit_timeout);
+ event_cancel(&vty->t_confirmed_commit_timeout);
+ event_add_timer(master, nb_cli_confirmed_commit_timeout,
+ vty, confirmed_timeout * 60,
+ &vty->t_confirmed_commit_timeout);
} else {
/* Accept commit confirmation. */
vty_out(vty, "%% Commit complete.\n\n");
vty->confirmed_commit_rollback = nb_config_dup(running_config);
vty->t_confirmed_commit_timeout = NULL;
- thread_add_timer(master, nb_cli_confirmed_commit_timeout, vty,
- confirmed_timeout * 60,
- &vty->t_confirmed_commit_timeout);
+ event_add_timer(master, nb_cli_confirmed_commit_timeout, vty,
+ confirmed_timeout * 60,
+ &vty->t_confirmed_commit_timeout);
}
context.client = NB_CLIENT_CLI;
.completions = yang_translator_autocomplete},
{.completions = NULL}};
-void nb_cli_init(struct thread_master *tm)
+void nb_cli_init(struct event_loop *tm)
{
master = tm;
extern void nb_cli_confirmed_commit_clean(struct vty *vty);
extern int nb_cli_confirmed_commit_rollback(struct vty *vty);
extern void nb_cli_install_default(int node);
-extern void nb_cli_init(struct thread_master *tm);
+extern void nb_cli_init(struct event_loop *tm);
extern void nb_cli_terminate(void);
#ifdef __cplusplus
static struct debug nb_dbg_client_confd = {0, "Northbound client: ConfD"};
-static struct thread_master *master;
+static struct event_loop *master;
static struct sockaddr confd_addr;
static int cdb_sub_sock, dp_ctl_sock, dp_worker_sock;
-static struct thread *t_cdb_sub, *t_dp_ctl, *t_dp_worker;
+static struct event *t_cdb_sub, *t_dp_ctl, *t_dp_worker;
static struct confd_daemon_ctx *dctx;
static struct confd_notification_ctx *live_ctx;
static bool confd_connected;
transaction = NULL;
context.client = NB_CLIENT_CONFD;
ret = nb_candidate_commit_prepare(context, candidate, NULL,
- &transaction, errmsg, sizeof(errmsg));
+ &transaction, false, false, errmsg,
+ sizeof(errmsg));
if (ret != NB_OK && ret != NB_ERR_NO_CHANGES) {
enum confd_errcode errcode;
return 0;
}
-static void frr_confd_cdb_read_cb(struct thread *thread)
+static void frr_confd_cdb_read_cb(struct event *thread)
{
- int fd = THREAD_FD(thread);
+ int fd = EVENT_FD(thread);
enum cdb_sub_notification cdb_ev;
int flags;
int *subp = NULL;
int reslen = 0;
- thread_add_read(master, frr_confd_cdb_read_cb, NULL, fd, &t_cdb_sub);
+ event_add_read(master, frr_confd_cdb_read_cb, NULL, fd, &t_cdb_sub);
if (cdb_read_subscription_socket2(fd, &cdb_ev, &flags, &subp, &reslen)
!= CONFD_OK) {
}
pthread_detach(cdb_trigger_thread);
- thread_add_read(master, frr_confd_cdb_read_cb, NULL, cdb_sub_sock,
- &t_cdb_sub);
+ event_add_read(master, frr_confd_cdb_read_cb, NULL, cdb_sub_sock,
+ &t_cdb_sub);
return 0;
static void frr_confd_finish_cdb(void)
{
if (cdb_sub_sock > 0) {
- THREAD_OFF(t_cdb_sub);
+ EVENT_OFF(t_cdb_sub);
cdb_close(cdb_sub_sock);
}
}
return 0;
}
-static void frr_confd_dp_ctl_read(struct thread *thread)
+static void frr_confd_dp_ctl_read(struct event *thread)
{
- struct confd_daemon_ctx *dctx = THREAD_ARG(thread);
- int fd = THREAD_FD(thread);
+ struct confd_daemon_ctx *dctx = EVENT_ARG(thread);
+ int fd = EVENT_FD(thread);
- thread_add_read(master, frr_confd_dp_ctl_read, dctx, fd, &t_dp_ctl);
+ event_add_read(master, frr_confd_dp_ctl_read, dctx, fd, &t_dp_ctl);
frr_confd_dp_read(dctx, fd);
}
-static void frr_confd_dp_worker_read(struct thread *thread)
+static void frr_confd_dp_worker_read(struct event *thread)
{
- struct confd_daemon_ctx *dctx = THREAD_ARG(thread);
- int fd = THREAD_FD(thread);
+ struct confd_daemon_ctx *dctx = EVENT_ARG(thread);
+ int fd = EVENT_FD(thread);
- thread_add_read(master, frr_confd_dp_worker_read, dctx, fd, &t_dp_worker);
+ event_add_read(master, frr_confd_dp_worker_read, dctx, fd,
+ &t_dp_worker);
frr_confd_dp_read(dctx, fd);
}
goto error;
}
- thread_add_read(master, frr_confd_dp_ctl_read, dctx, dp_ctl_sock,
- &t_dp_ctl);
- thread_add_read(master, frr_confd_dp_worker_read, dctx, dp_worker_sock,
- &t_dp_worker);
+ event_add_read(master, frr_confd_dp_ctl_read, dctx, dp_ctl_sock,
+ &t_dp_ctl);
+ event_add_read(master, frr_confd_dp_worker_read, dctx, dp_worker_sock,
+ &t_dp_worker);
return 0;
static void frr_confd_finish_dp(void)
{
if (dp_worker_sock > 0) {
- THREAD_OFF(t_dp_worker);
+ EVENT_OFF(t_dp_worker);
close(dp_worker_sock);
}
if (dp_ctl_sock > 0) {
- THREAD_OFF(t_dp_ctl);
+ EVENT_OFF(t_dp_ctl);
close(dp_ctl_sock);
}
if (dctx != NULL)
return 0;
}
-static int frr_confd_module_late_init(struct thread_master *tm)
+static int frr_confd_module_late_init(struct event_loop *tm)
{
master = tm;
#include "log.h"
#include "libfrr.h"
#include "lib/version.h"
-#include "lib/thread.h"
+#include "frrevent.h"
#include "command.h"
#include "lib_errors.h"
#include "northbound.h"
*/
static bool nb_dbg_client_grpc = 0;
-static struct thread_master *main_master;
+static struct event_loop *main_master;
static struct frr_pthread *fpt;
* state will either be MORE or FINISH. It will always be FINISH
* for Unary RPCs.
*/
- thread_add_event(main_master, c_callback, (void *)this, 0,
- NULL);
+ event_add_event(main_master, c_callback, (void *)this, 0, NULL);
pthread_mutex_lock(&this->cmux);
while (this->state == PROCESS)
}
protected:
- virtual CallState run_mainthread(struct thread *thread) = 0;
+ virtual CallState run_mainthread(struct event *thread) = 0;
- static void c_callback(struct thread *thread)
+ static void c_callback(struct event *thread)
{
- auto _tag = static_cast<RpcStateBase *>(THREAD_ARG(thread));
+ auto _tag = static_cast<RpcStateBase *>(EVENT_ARG(thread));
/*
* We hold the lock until the callback finishes and has updated
* _tag->state, then we signal done and release.
©->responder, cq, cq, copy);
}
- CallState run_mainthread(struct thread *thread) override
+ CallState run_mainthread(struct event *thread) override
{
// Unary RPC are always finished, see "Unary" :)
grpc::Status status = this->callback(this);
©->async_responder, cq, cq, copy);
}
- CallState run_mainthread(struct thread *thread) override
+ CallState run_mainthread(struct event *thread) override
{
if (this->callback(this))
return MORE;
grpc_debug("`-> Performing PREPARE");
ret = nb_candidate_commit_prepare(
context, candidate->config, comment.c_str(),
- &candidate->transaction, errmsg, sizeof(errmsg));
+ &candidate->transaction, false, false, errmsg,
+ sizeof(errmsg));
break;
case frr::CommitRequest::ABORT:
grpc_debug("`-> Performing ABORT");
* fork. This is done by scheduling this init function as an event task, since
* the event loop doesn't run until after fork.
*/
-static void frr_grpc_module_very_late_init(struct thread *thread)
+static void frr_grpc_module_very_late_init(struct event *thread)
{
const char *args = THIS_MODULE->load_args;
uint port = GRPC_DEFAULT_PORT;
flog_err(EC_LIB_GRPC_INIT, "failed to initialize the gRPC module");
}
-static int frr_grpc_module_late_init(struct thread_master *tm)
+static int frr_grpc_module_late_init(struct event_loop *tm)
{
main_master = tm;
hook_register(frr_fini, frr_grpc_finish);
- thread_add_event(tm, frr_grpc_module_very_late_init, NULL, 0, NULL);
+ event_add_event(tm, frr_grpc_module_very_late_init, NULL, 0, NULL);
return 0;
}
static struct debug nb_dbg_client_sysrepo = {0, "Northbound client: Sysrepo"};
-static struct thread_master *master;
+static struct event_loop *master;
static sr_session_ctx_t *session;
static sr_conn_ctx_t *connection;
static struct nb_transaction *transaction;
-static void frr_sr_read_cb(struct thread *thread);
+static void frr_sr_read_cb(struct event *thread);
static int frr_sr_finish(void);
/* Convert FRR YANG data value to sysrepo YANG data value. */
* required to apply them.
*/
ret = nb_candidate_commit_prepare(context, candidate, NULL,
- &transaction, errmsg, sizeof(errmsg));
+ &transaction, false, false, errmsg,
+ sizeof(errmsg));
if (ret != NB_OK && ret != NB_ERR_NO_CHANGES)
flog_warn(
EC_LIB_LIBSYSREPO,
return NB_OK;
}
-static void frr_sr_read_cb(struct thread *thread)
+static void frr_sr_read_cb(struct event *thread)
{
- struct yang_module *module = THREAD_ARG(thread);
- int fd = THREAD_FD(thread);
+ struct yang_module *module = EVENT_ARG(thread);
+ int fd = EVENT_FD(thread);
int ret;
ret = sr_subscription_process_events(module->sr_subscription, session,
return;
}
- thread_add_read(master, frr_sr_read_cb, module, fd, &module->sr_thread);
+ event_add_read(master, frr_sr_read_cb, module, fd, &module->sr_thread);
}
static void frr_sr_subscribe_config(struct yang_module *module)
sr_strerror(ret));
goto cleanup;
}
- thread_add_read(master, frr_sr_read_cb, module,
- event_pipe, &module->sr_thread);
+ event_add_read(master, frr_sr_read_cb, module, event_pipe,
+ &module->sr_thread);
}
hook_register(nb_notification_send, frr_sr_notification_send);
if (!module->sr_subscription)
continue;
sr_unsubscribe(module->sr_subscription);
- THREAD_OFF(module->sr_thread);
+ EVENT_OFF(module->sr_thread);
}
if (session)
return 0;
}
-static int frr_sr_module_config_loaded(struct thread_master *tm)
+static int frr_sr_module_config_loaded(struct event_loop *tm)
{
master = tm;
return 0;
}
-static int frr_sr_module_late_init(struct thread_master *tm)
+static int frr_sr_module_late_init(struct event_loop *tm)
{
frr_sr_cli_init();
if (IPV4_CLASS_D(ip))
return false;
- if (IPV4_CLASS_E(ip)) {
+ if (IPV4_NET0(ip) || IPV4_NET127(ip) || IPV4_CLASS_E(ip)) {
if (cmd_allow_reserved_ranges_get())
return true;
else
/* NOTE: This routine expects the address argument in network byte order. */
static inline bool ipv4_martian(const struct in_addr *addr)
{
- in_addr_t ip = ntohl(addr->s_addr);
-
- if (IPV4_NET0(ip) || IPV4_NET127(ip) || !ipv4_unicast_valid(addr)) {
+ if (!ipv4_unicast_valid(addr))
return true;
- }
return false;
}
struct pullwr {
int fd;
- struct thread_master *tm;
+ struct event_loop *tm;
/* writer == NULL <=> we're idle */
- struct thread *writer;
+ struct event *writer;
void *arg;
void (*fill)(void *, struct pullwr *);
DEFINE_MTYPE_STATIC(LIB, PULLWR_HEAD, "pull-driven write controller");
DEFINE_MTYPE_STATIC(LIB, PULLWR_BUF, "pull-driven write buffer");
-static void pullwr_run(struct thread *t);
+static void pullwr_run(struct event *t);
-struct pullwr *_pullwr_new(struct thread_master *tm, int fd,
- void *arg,
- void (*fill)(void *, struct pullwr *),
- void (*err)(void *, struct pullwr *, bool))
+struct pullwr *_pullwr_new(struct event_loop *tm, int fd, void *arg,
+ void (*fill)(void *, struct pullwr *),
+ void (*err)(void *, struct pullwr *, bool))
{
struct pullwr *pullwr;
void pullwr_del(struct pullwr *pullwr)
{
- THREAD_OFF(pullwr->writer);
+ EVENT_OFF(pullwr->writer);
XFREE(MTYPE_PULLWR_BUF, pullwr->buffer);
XFREE(MTYPE_PULLWR_HEAD, pullwr);
if (pullwr->writer)
return;
- thread_add_timer(pullwr->tm, pullwr_run, pullwr, 0, &pullwr->writer);
+ event_add_timer(pullwr->tm, pullwr_run, pullwr, 0, &pullwr->writer);
}
static size_t pullwr_iov(struct pullwr *pullwr, struct iovec *iov)
pullwr_bump(pullwr);
}
-static void pullwr_run(struct thread *t)
+static void pullwr_run(struct event *t)
{
- struct pullwr *pullwr = THREAD_ARG(t);
+ struct pullwr *pullwr = EVENT_ARG(t);
struct iovec iov[2];
size_t niov, lastvalid;
ssize_t nwr;
if (pullwr->valid == 0) {
/* we made a fill() call above that didn't feed any
* data in, and we have nothing more queued, so we go
- * into idle, i.e. no calling thread_add_write()
+ * into idle, i.e. no calling event_add_write()
*/
pullwr_resize(pullwr, 0);
return;
* is full and we go wait until it's available for writing again.
*/
- thread_add_write(pullwr->tm, pullwr_run, pullwr, pullwr->fd,
+ event_add_write(pullwr->tm, pullwr_run, pullwr, pullwr->fd,
&pullwr->writer);
/* if we hit the time limit, just keep the buffer, we'll probably need
#include <stdbool.h>
#include <stdint.h>
-#include "thread.h"
+#include "frrevent.h"
#include "stream.h"
#ifdef __cplusplus
* and released with pullwr_del(). This can be done from inside the callback,
* the pullwr code holds no more references on it when calling err().
*/
-extern struct pullwr *_pullwr_new(struct thread_master *tm, int fd,
- void *arg,
- void (*fill)(void *, struct pullwr *),
- void (*err)(void *, struct pullwr *, bool eof));
+extern struct pullwr *_pullwr_new(struct event_loop *tm, int fd, void *arg,
+ void (*fill)(void *, struct pullwr *),
+ void (*err)(void *, struct pullwr *,
+ bool eof));
extern void pullwr_del(struct pullwr *pullwr);
/* type-checking wrapper. makes sure fill() and err() take a first argument
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "hash.h"
#include "log.h"
#include "typesafe.h"
#include "jhash.h"
-#include "thread.h"
+#include "frrevent.h"
#include "lib_errors.h"
#include "resolver.h"
#include "command.h"
struct resolver_state {
ares_channel channel;
- struct thread_master *master;
- struct thread *timeout;
+ struct event_loop *master;
+ struct event *timeout;
};
static struct resolver_state state;
int fd;
struct resolver_state *state;
- struct thread *t_read, *t_write;
+ struct event *t_read, *t_write;
};
static int resolver_fd_cmp(const struct resolver_fd *a,
static void resolver_update_timeouts(struct resolver_state *r);
-static void resolver_cb_timeout(struct thread *t)
+static void resolver_cb_timeout(struct event *t)
{
- struct resolver_state *r = THREAD_ARG(t);
+ struct resolver_state *r = EVENT_ARG(t);
ares_process(r->channel, NULL, NULL);
resolver_update_timeouts(r);
}
-static void resolver_cb_socket_readable(struct thread *t)
+static void resolver_cb_socket_readable(struct event *t)
{
- struct resolver_fd *resfd = THREAD_ARG(t);
+ struct resolver_fd *resfd = EVENT_ARG(t);
struct resolver_state *r = resfd->state;
- thread_add_read(r->master, resolver_cb_socket_readable, resfd,
- resfd->fd, &resfd->t_read);
+ event_add_read(r->master, resolver_cb_socket_readable, resfd, resfd->fd,
+ &resfd->t_read);
/* ^ ordering important:
- * ares_process_fd may transitively call THREAD_OFF(resfd->t_read)
+ * ares_process_fd may transitively call EVENT_OFF(resfd->t_read)
* combined with resolver_fd_drop_maybe, so resfd may be free'd after!
*/
ares_process_fd(r->channel, resfd->fd, ARES_SOCKET_BAD);
resolver_update_timeouts(r);
}
-static void resolver_cb_socket_writable(struct thread *t)
+static void resolver_cb_socket_writable(struct event *t)
{
- struct resolver_fd *resfd = THREAD_ARG(t);
+ struct resolver_fd *resfd = EVENT_ARG(t);
struct resolver_state *r = resfd->state;
- thread_add_write(r->master, resolver_cb_socket_writable, resfd,
- resfd->fd, &resfd->t_write);
+ event_add_write(r->master, resolver_cb_socket_writable, resfd,
+ resfd->fd, &resfd->t_write);
/* ^ ordering important:
- * ares_process_fd may transitively call THREAD_OFF(resfd->t_write)
+ * ares_process_fd may transitively call EVENT_OFF(resfd->t_write)
* combined with resolver_fd_drop_maybe, so resfd may be free'd after!
*/
ares_process_fd(r->channel, ARES_SOCKET_BAD, resfd->fd);
{
struct timeval *tv, tvbuf;
- THREAD_OFF(r->timeout);
+ EVENT_OFF(r->timeout);
tv = ares_timeout(r->channel, NULL, &tvbuf);
if (tv) {
unsigned int timeoutms = tv->tv_sec * 1000 + tv->tv_usec / 1000;
- thread_add_timer_msec(r->master, resolver_cb_timeout, r,
- timeoutms, &r->timeout);
+ event_add_timer_msec(r->master, resolver_cb_timeout, r,
+ timeoutms, &r->timeout);
}
}
assert(resfd->state == r);
if (!readable)
- THREAD_OFF(resfd->t_read);
+ EVENT_OFF(resfd->t_read);
else if (!resfd->t_read)
- thread_add_read(r->master, resolver_cb_socket_readable, resfd,
- fd, &resfd->t_read);
+ event_add_read(r->master, resolver_cb_socket_readable, resfd,
+ fd, &resfd->t_read);
if (!writable)
- THREAD_OFF(resfd->t_write);
+ EVENT_OFF(resfd->t_write);
else if (!resfd->t_write)
- thread_add_write(r->master, resolver_cb_socket_writable, resfd,
- fd, &resfd->t_write);
+ event_add_write(r->master, resolver_cb_socket_writable, resfd,
+ fd, &resfd->t_write);
resolver_fd_drop_maybe(resfd);
}
callback(query, NULL, i, &addr[0]);
}
-static void resolver_cb_literal(struct thread *t)
+static void resolver_cb_literal(struct event *t)
{
- struct resolver_query *query = THREAD_ARG(t);
+ struct resolver_query *query = EVENT_ARG(t);
void (*callback)(struct resolver_query *, const char *, int,
union sockunion *);
/* for consistency with proper name lookup, don't call the
* callback immediately; defer to thread loop
*/
- thread_add_timer_msec(state.master, resolver_cb_literal,
- query, 0, &query->literal_cb);
+ event_add_timer_msec(state.master, resolver_cb_literal, query,
+ 0, &query->literal_cb);
return;
}
}
-void resolver_init(struct thread_master *tm)
+void resolver_init(struct event_loop *tm)
{
struct ares_options ares_opts;
#ifndef _FRR_RESOLVER_H
#define _FRR_RESOLVER_H
-#include "thread.h"
+#include "frrevent.h"
#include "sockunion.h"
#ifdef __cplusplus
/* used to immediate provide the result if IP literal is passed in */
union sockunion literal_addr;
- struct thread *literal_cb;
+ struct event *literal_cb;
};
-void resolver_init(struct thread_master *tm);
+void resolver_init(struct event_loop *tm);
void resolver_resolve(struct resolver_query *query, int af, vrf_id_t vrf_id,
const char *hostname,
void (*cb)(struct resolver_query *, const char *, int,
/* master signals descriptor struct */
static struct frr_sigevent_master_t {
- struct thread *t;
+ struct event *t;
struct frr_signal_t *signals;
int sigc;
#ifdef SIGEVENT_SCHEDULE_THREAD
/* timer thread to check signals. shouldn't be needed */
-void frr_signal_timer(struct thread *t)
+void frr_signal_timer(struct event *t)
{
struct frr_sigevent_master_t *sigm;
- sigm = THREAD_ARG(t);
+ sigm = EVENT_ARG(t);
sigm->t = NULL;
- thread_add_timer(sigm->t->master, frr_signal_timer, &sigmaster,
- FRR_SIGNAL_TIMER_INTERVAL, &sigm->t);
+ event_add_timer(sigm->t->master, frr_signal_timer, &sigmaster,
+ FRR_SIGNAL_TIMER_INTERVAL, &sigm->t);
frr_sigevent_process();
}
#endif /* SIGEVENT_SCHEDULE_THREAD */
}
}
-void signal_init(struct thread_master *m, int sigc,
- struct frr_signal_t signals[])
+void signal_init(struct event_loop *m, int sigc, struct frr_signal_t signals[])
{
int i = 0;
#ifdef SIGEVENT_SCHEDULE_THREAD
sigmaster.t = NULL;
- thread_add_timer(m, frr_signal_timer, &sigmaster,
- FRR_SIGNAL_TIMER_INTERVAL, &sigmaster.t);
+ event_add_timer(m, frr_signal_timer, &sigmaster,
+ FRR_SIGNAL_TIMER_INTERVAL, &sigmaster.t);
#endif /* SIGEVENT_SCHEDULE_THREAD */
}
#ifndef _FRR_SIGNAL_H
#define _FRR_SIGNAL_H
-#include <thread.h>
+#include <frrevent.h>
#ifdef __cplusplus
extern "C" {
/* initialise sigevent system
* takes:
- * - pointer to valid struct thread_master
+ * - pointer to valid struct event_loop
* - number of elements in passed in signals array
* - array of frr_signal_t's describing signals to handle
* and handlers to use for each signal
*/
-extern void signal_init(struct thread_master *m, int sigc,
+extern void signal_init(struct event_loop *m, int sigc,
struct frr_signal_t *signals);
#include <net-snmp/agent/net-snmp-agent-includes.h>
#include <net-snmp/agent/snmp_vars.h>
-#include "thread.h"
+#include "frrevent.h"
#include "hook.h"
#ifdef __cplusplus
*/
extern bool smux_enabled(void);
-extern void smux_init(struct thread_master *tm);
+extern void smux_init(struct event_loop *tm);
extern void smux_agentx_enable(void);
extern void smux_register_mib(const char *, struct variable *, size_t, int,
oid[], size_t);
#include "command.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
DEFINE_MTYPE_STATIC(LIB, SPF_BACKOFF, "SPF backoff");
};
struct spf_backoff {
- struct thread_master *m;
+ struct event_loop *m;
/* Timers as per draft */
long init_delay;
/* State machine */
enum spf_backoff_state state;
- struct thread *t_holddown;
- struct thread *t_timetolearn;
+ struct event *t_holddown;
+ struct event *t_timetolearn;
/* For debugging */
char *name;
return "???";
}
-struct spf_backoff *spf_backoff_new(struct thread_master *m, const char *name,
+struct spf_backoff *spf_backoff_new(struct event_loop *m, const char *name,
long init_delay, long short_delay,
long long_delay, long holddown,
long timetolearn)
if (!backoff)
return;
- thread_cancel(&backoff->t_holddown);
- thread_cancel(&backoff->t_timetolearn);
+ event_cancel(&backoff->t_holddown);
+ event_cancel(&backoff->t_timetolearn);
XFREE(MTYPE_SPF_BACKOFF_NAME, backoff->name);
XFREE(MTYPE_SPF_BACKOFF, backoff);
}
-static void spf_backoff_timetolearn_elapsed(struct thread *thread)
+static void spf_backoff_timetolearn_elapsed(struct event *thread)
{
- struct spf_backoff *backoff = THREAD_ARG(thread);
+ struct spf_backoff *backoff = EVENT_ARG(thread);
backoff->state = SPF_BACKOFF_LONG_WAIT;
backoff_debug("SPF Back-off(%s) TIMETOLEARN elapsed, move to state %s",
backoff->name, spf_backoff_state2str(backoff->state));
}
-static void spf_backoff_holddown_elapsed(struct thread *thread)
+static void spf_backoff_holddown_elapsed(struct event *thread)
{
- struct spf_backoff *backoff = THREAD_ARG(thread);
+ struct spf_backoff *backoff = EVENT_ARG(thread);
- THREAD_OFF(backoff->t_timetolearn);
+ EVENT_OFF(backoff->t_timetolearn);
timerclear(&backoff->first_event_time);
backoff->state = SPF_BACKOFF_QUIET;
backoff_debug("SPF Back-off(%s) HOLDDOWN elapsed, move to state %s",
switch (backoff->state) {
case SPF_BACKOFF_QUIET:
backoff->state = SPF_BACKOFF_SHORT_WAIT;
- thread_add_timer_msec(
+ event_add_timer_msec(
backoff->m, spf_backoff_timetolearn_elapsed, backoff,
backoff->timetolearn, &backoff->t_timetolearn);
- thread_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
- backoff, backoff->holddown,
- &backoff->t_holddown);
+ event_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
+ backoff, backoff->holddown,
+ &backoff->t_holddown);
backoff->first_event_time = now;
rv = backoff->init_delay;
break;
case SPF_BACKOFF_SHORT_WAIT:
case SPF_BACKOFF_LONG_WAIT:
- thread_cancel(&backoff->t_holddown);
- thread_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
- backoff, backoff->holddown,
- &backoff->t_holddown);
+ event_cancel(&backoff->t_holddown);
+ event_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
+ backoff, backoff->holddown,
+ &backoff->t_holddown);
if (backoff->state == SPF_BACKOFF_SHORT_WAIT)
rv = backoff->short_delay;
else
vty_out(vty, "%sHolddown timer: %ld msec\n", prefix,
backoff->holddown);
if (backoff->t_holddown) {
- struct timeval remain =
- thread_timer_remain(backoff->t_holddown);
+ struct timeval remain = event_timer_remain(backoff->t_holddown);
+
vty_out(vty, "%s Still runs for %lld msec\n",
prefix,
(long long)remain.tv_sec * 1000
backoff->timetolearn);
if (backoff->t_timetolearn) {
struct timeval remain =
- thread_timer_remain(backoff->t_timetolearn);
+ event_timer_remain(backoff->t_timetolearn);
vty_out(vty, "%s Still runs for %lld msec\n",
prefix,
(long long)remain.tv_sec * 1000
#endif
struct spf_backoff;
-struct thread_master;
+struct event_loop;
struct vty;
-struct spf_backoff *spf_backoff_new(struct thread_master *m, const char *name,
+struct spf_backoff *spf_backoff_new(struct event_loop *m, const char *name,
long init_delay, long short_delay,
long long_delay, long holddown,
long timetolearn);
lib/frrscript.c \
lib/frr_pthread.c \
lib/frrstr.c \
- lib/getopt.c \
- lib/getopt1.c \
lib/grammar_sandbox.c \
lib/graph.c \
lib/hash.c \
lib/log_vty.c \
lib/md5.c \
lib/memory.c \
+ lib/mgmt_be_client.c \
+ lib/mgmt_fe_client.c \
+ lib/mgmt_msg.c \
lib/mlag.c \
lib/module.c \
lib/mpls.c \
lib/systemd.c \
lib/table.c \
lib/termtable.c \
- lib/thread.c \
+ lib/event.c \
lib/typerb.c \
lib/typesafe.c \
lib/vector.c \
yang/frr-module-translator.yang.c \
# end
+# Add logic to build mgmt.proto
+lib_libfrr_la_LIBADD += $(PROTOBUF_C_LIBS)
+
+BUILT_SOURCES += \
+ lib/mgmt.pb-c.c \
+ lib/mgmt.pb-c.h \
+ # end
+
+CLEANFILES += \
+ lib/mgmt.pb-c.h \
+ lib/mgmt.pb-c.c \
+ # end
+
+lib_libfrr_la_SOURCES += \
+ lib/mgmt.pb-c.c \
+ #end
+
if SQLITE3
lib_libfrr_la_LIBADD += $(SQLITE3_LIBS)
lib_libfrr_la_SOURCES += lib/db.c
lib/plist.c \
lib/routemap.c \
lib/routemap_cli.c \
- lib/thread.c \
+ lib/event.c \
lib/vty.c \
lib/zlog_5424_cli.c \
# end
lib/frratomic.h \
lib/frrcu.h \
lib/frrstr.h \
- lib/getopt.h \
lib/graph.h \
lib/hash.h \
lib/hook.h \
lib/log_vty.h \
lib/md5.h \
lib/memory.h \
+ lib/mgmt.pb-c.h \
+ lib/mgmt_be_client.h \
+ lib/mgmt_fe_client.h \
+ lib/mgmt_msg.h \
+ lib/mgmt_pb.h \
lib/module.h \
lib/monotime.h \
lib/mpls.h \
lib/systemd.h \
lib/table.h \
lib/termtable.h \
- lib/thread.h \
+ lib/frrevent.h \
lib/trace.h \
lib/typerb.h \
lib/typesafe.h \
#include <zebra.h>
#include <sys/un.h>
-#include "thread.h"
+#include "frrevent.h"
#include "systemd.h"
#include "lib_errors.h"
systemd_send_information("STOPPING=1");
}
-static struct thread_master *systemd_master = NULL;
+static struct event_loop *systemd_master = NULL;
-static void systemd_send_watchdog(struct thread *t)
+static void systemd_send_watchdog(struct event *t)
{
systemd_send_information("WATCHDOG=1");
assert(watchdog_msec > 0);
- thread_add_timer_msec(systemd_master, systemd_send_watchdog, NULL,
- watchdog_msec, NULL);
+ event_add_timer_msec(systemd_master, systemd_send_watchdog, NULL,
+ watchdog_msec, NULL);
}
-void systemd_send_started(struct thread_master *m)
+void systemd_send_started(struct event_loop *m)
{
assert(m != NULL);
void systemd_send_stopping(void);
/*
- * master - The struct thread_master * to use to schedule ourself
+ * master - The struct event_loop * to use to schedule ourself
* the_process - Should we send watchdog if we are not the requested
* process?
*/
-void systemd_send_started(struct thread_master *master);
+void systemd_send_started(struct event_loop *master);
/*
* status - A status string to send to systemd
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Thread management routine
- * Copyright (C) 1998, 2000 Kunihiro Ishiguro <kunihiro@zebra.org>
- */
-
-/* #define DEBUG */
-
-#include <zebra.h>
-#include <sys/resource.h>
-
-#include "thread.h"
-#include "memory.h"
-#include "frrcu.h"
-#include "log.h"
-#include "hash.h"
-#include "command.h"
-#include "sigevent.h"
-#include "network.h"
-#include "jhash.h"
-#include "frratomic.h"
-#include "frr_pthread.h"
-#include "lib_errors.h"
-#include "libfrr_trace.h"
-#include "libfrr.h"
-
-DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread");
-DEFINE_MTYPE_STATIC(LIB, THREAD_MASTER, "Thread master");
-DEFINE_MTYPE_STATIC(LIB, THREAD_POLL, "Thread Poll Info");
-DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats");
-
-DECLARE_LIST(thread_list, struct thread, threaditem);
-
-struct cancel_req {
- int flags;
- struct thread *thread;
- void *eventobj;
- struct thread **threadref;
-};
-
-/* Flags for task cancellation */
-#define THREAD_CANCEL_FLAG_READY 0x01
-
-static int thread_timer_cmp(const struct thread *a, const struct thread *b)
-{
- if (a->u.sands.tv_sec < b->u.sands.tv_sec)
- return -1;
- if (a->u.sands.tv_sec > b->u.sands.tv_sec)
- return 1;
- if (a->u.sands.tv_usec < b->u.sands.tv_usec)
- return -1;
- if (a->u.sands.tv_usec > b->u.sands.tv_usec)
- return 1;
- return 0;
-}
-
-DECLARE_HEAP(thread_timer_list, struct thread, timeritem, thread_timer_cmp);
-
-#if defined(__APPLE__)
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-#endif
-
-#define AWAKEN(m) \
- do { \
- const unsigned char wakebyte = 0x01; \
- write(m->io_pipe[1], &wakebyte, 1); \
- } while (0);
-
-/* control variable for initializer */
-static pthread_once_t init_once = PTHREAD_ONCE_INIT;
-pthread_key_t thread_current;
-
-static pthread_mutex_t masters_mtx = PTHREAD_MUTEX_INITIALIZER;
-static struct list *masters;
-
-static void thread_free(struct thread_master *master, struct thread *thread);
-
-#ifndef EXCLUDE_CPU_TIME
-#define EXCLUDE_CPU_TIME 0
-#endif
-#ifndef CONSUMED_TIME_CHECK
-#define CONSUMED_TIME_CHECK 0
-#endif
-
-bool cputime_enabled = !EXCLUDE_CPU_TIME;
-unsigned long cputime_threshold = CONSUMED_TIME_CHECK;
-unsigned long walltime_threshold = CONSUMED_TIME_CHECK;
-
-/* CLI start ---------------------------------------------------------------- */
-#include "lib/thread_clippy.c"
-
-static unsigned int cpu_record_hash_key(const struct cpu_thread_history *a)
-{
- int size = sizeof(a->func);
-
- return jhash(&a->func, size, 0);
-}
-
-static bool cpu_record_hash_cmp(const struct cpu_thread_history *a,
- const struct cpu_thread_history *b)
-{
- return a->func == b->func;
-}
-
-static void *cpu_record_hash_alloc(struct cpu_thread_history *a)
-{
- struct cpu_thread_history *new;
- new = XCALLOC(MTYPE_THREAD_STATS, sizeof(struct cpu_thread_history));
- new->func = a->func;
- new->funcname = a->funcname;
- return new;
-}
-
-static void cpu_record_hash_free(void *a)
-{
- struct cpu_thread_history *hist = a;
-
- XFREE(MTYPE_THREAD_STATS, hist);
-}
-
-static void vty_out_cpu_thread_history(struct vty *vty,
- struct cpu_thread_history *a)
-{
- vty_out(vty,
- "%5zu %10zu.%03zu %9zu %8zu %9zu %8zu %9zu %9zu %9zu %10zu",
- a->total_active, a->cpu.total / 1000, a->cpu.total % 1000,
- a->total_calls, (a->cpu.total / a->total_calls), a->cpu.max,
- (a->real.total / a->total_calls), a->real.max,
- a->total_cpu_warn, a->total_wall_warn, a->total_starv_warn);
- vty_out(vty, " %c%c%c%c%c %s\n",
- a->types & (1 << THREAD_READ) ? 'R' : ' ',
- a->types & (1 << THREAD_WRITE) ? 'W' : ' ',
- a->types & (1 << THREAD_TIMER) ? 'T' : ' ',
- a->types & (1 << THREAD_EVENT) ? 'E' : ' ',
- a->types & (1 << THREAD_EXECUTE) ? 'X' : ' ', a->funcname);
-}
-
-static void cpu_record_hash_print(struct hash_bucket *bucket, void *args[])
-{
- struct cpu_thread_history *totals = args[0];
- struct cpu_thread_history copy;
- struct vty *vty = args[1];
- uint8_t *filter = args[2];
-
- struct cpu_thread_history *a = bucket->data;
-
- copy.total_active =
- atomic_load_explicit(&a->total_active, memory_order_seq_cst);
- copy.total_calls =
- atomic_load_explicit(&a->total_calls, memory_order_seq_cst);
- copy.total_cpu_warn =
- atomic_load_explicit(&a->total_cpu_warn, memory_order_seq_cst);
- copy.total_wall_warn =
- atomic_load_explicit(&a->total_wall_warn, memory_order_seq_cst);
- copy.total_starv_warn = atomic_load_explicit(&a->total_starv_warn,
- memory_order_seq_cst);
- copy.cpu.total =
- atomic_load_explicit(&a->cpu.total, memory_order_seq_cst);
- copy.cpu.max = atomic_load_explicit(&a->cpu.max, memory_order_seq_cst);
- copy.real.total =
- atomic_load_explicit(&a->real.total, memory_order_seq_cst);
- copy.real.max =
- atomic_load_explicit(&a->real.max, memory_order_seq_cst);
- copy.types = atomic_load_explicit(&a->types, memory_order_seq_cst);
- copy.funcname = a->funcname;
-
- if (!(copy.types & *filter))
- return;
-
- vty_out_cpu_thread_history(vty, ©);
- totals->total_active += copy.total_active;
- totals->total_calls += copy.total_calls;
- totals->total_cpu_warn += copy.total_cpu_warn;
- totals->total_wall_warn += copy.total_wall_warn;
- totals->total_starv_warn += copy.total_starv_warn;
- totals->real.total += copy.real.total;
- if (totals->real.max < copy.real.max)
- totals->real.max = copy.real.max;
- totals->cpu.total += copy.cpu.total;
- if (totals->cpu.max < copy.cpu.max)
- totals->cpu.max = copy.cpu.max;
-}
-
-static void cpu_record_print(struct vty *vty, uint8_t filter)
-{
- struct cpu_thread_history tmp;
- void *args[3] = {&tmp, vty, &filter};
- struct thread_master *m;
- struct listnode *ln;
-
- if (!cputime_enabled)
- vty_out(vty,
- "\n"
- "Collecting CPU time statistics is currently disabled. Following statistics\n"
- "will be zero or may display data from when collection was enabled. Use the\n"
- " \"service cputime-stats\" command to start collecting data.\n"
- "\nCounters and wallclock times are always maintained and should be accurate.\n");
-
- memset(&tmp, 0, sizeof(tmp));
- tmp.funcname = "TOTAL";
- tmp.types = filter;
-
- frr_with_mutex (&masters_mtx) {
- for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
- const char *name = m->name ? m->name : "main";
-
- char underline[strlen(name) + 1];
- memset(underline, '-', sizeof(underline));
- underline[sizeof(underline) - 1] = '\0';
-
- vty_out(vty, "\n");
- vty_out(vty, "Showing statistics for pthread %s\n",
- name);
- vty_out(vty, "-------------------------------%s\n",
- underline);
- vty_out(vty, "%30s %18s %18s\n", "",
- "CPU (user+system):", "Real (wall-clock):");
- vty_out(vty,
- "Active Runtime(ms) Invoked Avg uSec Max uSecs");
- vty_out(vty, " Avg uSec Max uSecs");
- vty_out(vty,
- " CPU_Warn Wall_Warn Starv_Warn Type Thread\n");
-
- if (m->cpu_record->count)
- hash_iterate(
- m->cpu_record,
- (void (*)(struct hash_bucket *,
- void *))cpu_record_hash_print,
- args);
- else
- vty_out(vty, "No data to display yet.\n");
-
- vty_out(vty, "\n");
- }
- }
-
- vty_out(vty, "\n");
- vty_out(vty, "Total thread statistics\n");
- vty_out(vty, "-------------------------\n");
- vty_out(vty, "%30s %18s %18s\n", "",
- "CPU (user+system):", "Real (wall-clock):");
- vty_out(vty, "Active Runtime(ms) Invoked Avg uSec Max uSecs");
- vty_out(vty, " Avg uSec Max uSecs CPU_Warn Wall_Warn");
- vty_out(vty, " Type Thread\n");
-
- if (tmp.total_calls > 0)
- vty_out_cpu_thread_history(vty, &tmp);
-}
-
-static void cpu_record_hash_clear(struct hash_bucket *bucket, void *args[])
-{
- uint8_t *filter = args[0];
- struct hash *cpu_record = args[1];
-
- struct cpu_thread_history *a = bucket->data;
-
- if (!(a->types & *filter))
- return;
-
- hash_release(cpu_record, bucket->data);
-}
-
-static void cpu_record_clear(uint8_t filter)
-{
- uint8_t *tmp = &filter;
- struct thread_master *m;
- struct listnode *ln;
-
- frr_with_mutex (&masters_mtx) {
- for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
- frr_with_mutex (&m->mtx) {
- void *args[2] = {tmp, m->cpu_record};
- hash_iterate(
- m->cpu_record,
- (void (*)(struct hash_bucket *,
- void *))cpu_record_hash_clear,
- args);
- }
- }
- }
-}
-
-static uint8_t parse_filter(const char *filterstr)
-{
- int i = 0;
- int filter = 0;
-
- while (filterstr[i] != '\0') {
- switch (filterstr[i]) {
- case 'r':
- case 'R':
- filter |= (1 << THREAD_READ);
- break;
- case 'w':
- case 'W':
- filter |= (1 << THREAD_WRITE);
- break;
- case 't':
- case 'T':
- filter |= (1 << THREAD_TIMER);
- break;
- case 'e':
- case 'E':
- filter |= (1 << THREAD_EVENT);
- break;
- case 'x':
- case 'X':
- filter |= (1 << THREAD_EXECUTE);
- break;
- default:
- break;
- }
- ++i;
- }
- return filter;
-}
-
-DEFUN_NOSH (show_thread_cpu,
- show_thread_cpu_cmd,
- "show thread cpu [FILTER]",
- SHOW_STR
- "Thread information\n"
- "Thread CPU usage\n"
- "Display filter (rwtex)\n")
-{
- uint8_t filter = (uint8_t)-1U;
- int idx = 0;
-
- if (argv_find(argv, argc, "FILTER", &idx)) {
- filter = parse_filter(argv[idx]->arg);
- if (!filter) {
- vty_out(vty,
- "Invalid filter \"%s\" specified; must contain at leastone of 'RWTEXB'\n",
- argv[idx]->arg);
- return CMD_WARNING;
- }
- }
-
- cpu_record_print(vty, filter);
- return CMD_SUCCESS;
-}
-
-DEFPY (service_cputime_stats,
- service_cputime_stats_cmd,
- "[no] service cputime-stats",
- NO_STR
- "Set up miscellaneous service\n"
- "Collect CPU usage statistics\n")
-{
- cputime_enabled = !no;
- return CMD_SUCCESS;
-}
-
-DEFPY (service_cputime_warning,
- service_cputime_warning_cmd,
- "[no] service cputime-warning (1-4294967295)",
- NO_STR
- "Set up miscellaneous service\n"
- "Warn for tasks exceeding CPU usage threshold\n"
- "Warning threshold in milliseconds\n")
-{
- if (no)
- cputime_threshold = 0;
- else
- cputime_threshold = cputime_warning * 1000;
- return CMD_SUCCESS;
-}
-
-ALIAS (service_cputime_warning,
- no_service_cputime_warning_cmd,
- "no service cputime-warning",
- NO_STR
- "Set up miscellaneous service\n"
- "Warn for tasks exceeding CPU usage threshold\n")
-
-DEFPY (service_walltime_warning,
- service_walltime_warning_cmd,
- "[no] service walltime-warning (1-4294967295)",
- NO_STR
- "Set up miscellaneous service\n"
- "Warn for tasks exceeding total wallclock threshold\n"
- "Warning threshold in milliseconds\n")
-{
- if (no)
- walltime_threshold = 0;
- else
- walltime_threshold = walltime_warning * 1000;
- return CMD_SUCCESS;
-}
-
-ALIAS (service_walltime_warning,
- no_service_walltime_warning_cmd,
- "no service walltime-warning",
- NO_STR
- "Set up miscellaneous service\n"
- "Warn for tasks exceeding total wallclock threshold\n")
-
-static void show_thread_poll_helper(struct vty *vty, struct thread_master *m)
-{
- const char *name = m->name ? m->name : "main";
- char underline[strlen(name) + 1];
- struct thread *thread;
- uint32_t i;
-
- memset(underline, '-', sizeof(underline));
- underline[sizeof(underline) - 1] = '\0';
-
- vty_out(vty, "\nShowing poll FD's for %s\n", name);
- vty_out(vty, "----------------------%s\n", underline);
- vty_out(vty, "Count: %u/%d\n", (uint32_t)m->handler.pfdcount,
- m->fd_limit);
- for (i = 0; i < m->handler.pfdcount; i++) {
- vty_out(vty, "\t%6d fd:%6d events:%2d revents:%2d\t\t", i,
- m->handler.pfds[i].fd, m->handler.pfds[i].events,
- m->handler.pfds[i].revents);
-
- if (m->handler.pfds[i].events & POLLIN) {
- thread = m->read[m->handler.pfds[i].fd];
-
- if (!thread)
- vty_out(vty, "ERROR ");
- else
- vty_out(vty, "%s ", thread->xref->funcname);
- } else
- vty_out(vty, " ");
-
- if (m->handler.pfds[i].events & POLLOUT) {
- thread = m->write[m->handler.pfds[i].fd];
-
- if (!thread)
- vty_out(vty, "ERROR\n");
- else
- vty_out(vty, "%s\n", thread->xref->funcname);
- } else
- vty_out(vty, "\n");
- }
-}
-
-DEFUN_NOSH (show_thread_poll,
- show_thread_poll_cmd,
- "show thread poll",
- SHOW_STR
- "Thread information\n"
- "Show poll FD's and information\n")
-{
- struct listnode *node;
- struct thread_master *m;
-
- frr_with_mutex (&masters_mtx) {
- for (ALL_LIST_ELEMENTS_RO(masters, node, m)) {
- show_thread_poll_helper(vty, m);
- }
- }
-
- return CMD_SUCCESS;
-}
-
-
-DEFUN (clear_thread_cpu,
- clear_thread_cpu_cmd,
- "clear thread cpu [FILTER]",
- "Clear stored data in all pthreads\n"
- "Thread information\n"
- "Thread CPU usage\n"
- "Display filter (rwtexb)\n")
-{
- uint8_t filter = (uint8_t)-1U;
- int idx = 0;
-
- if (argv_find(argv, argc, "FILTER", &idx)) {
- filter = parse_filter(argv[idx]->arg);
- if (!filter) {
- vty_out(vty,
- "Invalid filter \"%s\" specified; must contain at leastone of 'RWTEXB'\n",
- argv[idx]->arg);
- return CMD_WARNING;
- }
- }
-
- cpu_record_clear(filter);
- return CMD_SUCCESS;
-}
-
-static void show_thread_timers_helper(struct vty *vty, struct thread_master *m)
-{
- const char *name = m->name ? m->name : "main";
- char underline[strlen(name) + 1];
- struct thread *thread;
-
- memset(underline, '-', sizeof(underline));
- underline[sizeof(underline) - 1] = '\0';
-
- vty_out(vty, "\nShowing timers for %s\n", name);
- vty_out(vty, "-------------------%s\n", underline);
-
- frr_each (thread_timer_list, &m->timer, thread) {
- vty_out(vty, " %-50s%pTH\n", thread->hist->funcname, thread);
- }
-}
-
-DEFPY_NOSH (show_thread_timers,
- show_thread_timers_cmd,
- "show thread timers",
- SHOW_STR
- "Thread information\n"
- "Show all timers and how long they have in the system\n")
-{
- struct listnode *node;
- struct thread_master *m;
-
- frr_with_mutex (&masters_mtx) {
- for (ALL_LIST_ELEMENTS_RO(masters, node, m))
- show_thread_timers_helper(vty, m);
- }
-
- return CMD_SUCCESS;
-}
-
-void thread_cmd_init(void)
-{
- install_element(VIEW_NODE, &show_thread_cpu_cmd);
- install_element(VIEW_NODE, &show_thread_poll_cmd);
- install_element(ENABLE_NODE, &clear_thread_cpu_cmd);
-
- install_element(CONFIG_NODE, &service_cputime_stats_cmd);
- install_element(CONFIG_NODE, &service_cputime_warning_cmd);
- install_element(CONFIG_NODE, &no_service_cputime_warning_cmd);
- install_element(CONFIG_NODE, &service_walltime_warning_cmd);
- install_element(CONFIG_NODE, &no_service_walltime_warning_cmd);
-
- install_element(VIEW_NODE, &show_thread_timers_cmd);
-}
-/* CLI end ------------------------------------------------------------------ */
-
-
-static void cancelreq_del(void *cr)
-{
- XFREE(MTYPE_TMP, cr);
-}
-
-/* initializer, only ever called once */
-static void initializer(void)
-{
- pthread_key_create(&thread_current, NULL);
-}
-
-struct thread_master *thread_master_create(const char *name)
-{
- struct thread_master *rv;
- struct rlimit limit;
-
- pthread_once(&init_once, &initializer);
-
- rv = XCALLOC(MTYPE_THREAD_MASTER, sizeof(struct thread_master));
-
- /* Initialize master mutex */
- pthread_mutex_init(&rv->mtx, NULL);
- pthread_cond_init(&rv->cancel_cond, NULL);
-
- /* Set name */
- name = name ? name : "default";
- rv->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
-
- /* Initialize I/O task data structures */
-
- /* Use configured limit if present, ulimit otherwise. */
- rv->fd_limit = frr_get_fd_limit();
- if (rv->fd_limit == 0) {
- getrlimit(RLIMIT_NOFILE, &limit);
- rv->fd_limit = (int)limit.rlim_cur;
- }
-
- rv->read = XCALLOC(MTYPE_THREAD_POLL,
- sizeof(struct thread *) * rv->fd_limit);
-
- rv->write = XCALLOC(MTYPE_THREAD_POLL,
- sizeof(struct thread *) * rv->fd_limit);
-
- char tmhashname[strlen(name) + 32];
- snprintf(tmhashname, sizeof(tmhashname), "%s - threadmaster event hash",
- name);
- rv->cpu_record = hash_create_size(
- 8, (unsigned int (*)(const void *))cpu_record_hash_key,
- (bool (*)(const void *, const void *))cpu_record_hash_cmp,
- tmhashname);
-
- thread_list_init(&rv->event);
- thread_list_init(&rv->ready);
- thread_list_init(&rv->unuse);
- thread_timer_list_init(&rv->timer);
-
- /* Initialize thread_fetch() settings */
- rv->spin = true;
- rv->handle_signals = true;
-
- /* Set pthread owner, should be updated by actual owner */
- rv->owner = pthread_self();
- rv->cancel_req = list_new();
- rv->cancel_req->del = cancelreq_del;
- rv->canceled = true;
-
- /* Initialize pipe poker */
- pipe(rv->io_pipe);
- set_nonblocking(rv->io_pipe[0]);
- set_nonblocking(rv->io_pipe[1]);
-
- /* Initialize data structures for poll() */
- rv->handler.pfdsize = rv->fd_limit;
- rv->handler.pfdcount = 0;
- rv->handler.pfds = XCALLOC(MTYPE_THREAD_MASTER,
- sizeof(struct pollfd) * rv->handler.pfdsize);
- rv->handler.copy = XCALLOC(MTYPE_THREAD_MASTER,
- sizeof(struct pollfd) * rv->handler.pfdsize);
-
- /* add to list of threadmasters */
- frr_with_mutex (&masters_mtx) {
- if (!masters)
- masters = list_new();
-
- listnode_add(masters, rv);
- }
-
- return rv;
-}
-
-void thread_master_set_name(struct thread_master *master, const char *name)
-{
- frr_with_mutex (&master->mtx) {
- XFREE(MTYPE_THREAD_MASTER, master->name);
- master->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
- }
-}
-
-#define THREAD_UNUSED_DEPTH 10
-
-/* Move thread to unuse list. */
-static void thread_add_unuse(struct thread_master *m, struct thread *thread)
-{
- pthread_mutex_t mtxc = thread->mtx;
-
- assert(m != NULL && thread != NULL);
-
- thread->hist->total_active--;
- memset(thread, 0, sizeof(struct thread));
- thread->type = THREAD_UNUSED;
-
- /* Restore the thread mutex context. */
- thread->mtx = mtxc;
-
- if (thread_list_count(&m->unuse) < THREAD_UNUSED_DEPTH) {
- thread_list_add_tail(&m->unuse, thread);
- return;
- }
-
- thread_free(m, thread);
-}
-
-/* Free all unused thread. */
-static void thread_list_free(struct thread_master *m,
- struct thread_list_head *list)
-{
- struct thread *t;
-
- while ((t = thread_list_pop(list)))
- thread_free(m, t);
-}
-
-static void thread_array_free(struct thread_master *m,
- struct thread **thread_array)
-{
- struct thread *t;
- int index;
-
- for (index = 0; index < m->fd_limit; ++index) {
- t = thread_array[index];
- if (t) {
- thread_array[index] = NULL;
- thread_free(m, t);
- }
- }
- XFREE(MTYPE_THREAD_POLL, thread_array);
-}
-
-/*
- * thread_master_free_unused
- *
- * As threads are finished with they are put on the
- * unuse list for later reuse.
- * If we are shutting down, Free up unused threads
- * So we can see if we forget to shut anything off
- */
-void thread_master_free_unused(struct thread_master *m)
-{
- frr_with_mutex (&m->mtx) {
- struct thread *t;
- while ((t = thread_list_pop(&m->unuse)))
- thread_free(m, t);
- }
-}
-
-/* Stop thread scheduler. */
-void thread_master_free(struct thread_master *m)
-{
- struct thread *t;
-
- frr_with_mutex (&masters_mtx) {
- listnode_delete(masters, m);
- if (masters->count == 0) {
- list_delete(&masters);
- }
- }
-
- thread_array_free(m, m->read);
- thread_array_free(m, m->write);
- while ((t = thread_timer_list_pop(&m->timer)))
- thread_free(m, t);
- thread_list_free(m, &m->event);
- thread_list_free(m, &m->ready);
- thread_list_free(m, &m->unuse);
- pthread_mutex_destroy(&m->mtx);
- pthread_cond_destroy(&m->cancel_cond);
- close(m->io_pipe[0]);
- close(m->io_pipe[1]);
- list_delete(&m->cancel_req);
- m->cancel_req = NULL;
-
- hash_clean(m->cpu_record, cpu_record_hash_free);
- hash_free(m->cpu_record);
- m->cpu_record = NULL;
-
- XFREE(MTYPE_THREAD_MASTER, m->name);
- XFREE(MTYPE_THREAD_MASTER, m->handler.pfds);
- XFREE(MTYPE_THREAD_MASTER, m->handler.copy);
- XFREE(MTYPE_THREAD_MASTER, m);
-}
-
-/* Return remain time in milliseconds. */
-unsigned long thread_timer_remain_msec(struct thread *thread)
-{
- int64_t remain;
-
- if (!thread_is_scheduled(thread))
- return 0;
-
- frr_with_mutex (&thread->mtx) {
- remain = monotime_until(&thread->u.sands, NULL) / 1000LL;
- }
-
- return remain < 0 ? 0 : remain;
-}
-
-/* Return remain time in seconds. */
-unsigned long thread_timer_remain_second(struct thread *thread)
-{
- return thread_timer_remain_msec(thread) / 1000LL;
-}
-
-struct timeval thread_timer_remain(struct thread *thread)
-{
- struct timeval remain;
- frr_with_mutex (&thread->mtx) {
- monotime_until(&thread->u.sands, &remain);
- }
- return remain;
-}
-
-static int time_hhmmss(char *buf, int buf_size, long sec)
-{
- long hh;
- long mm;
- int wr;
-
- assert(buf_size >= 8);
-
- hh = sec / 3600;
- sec %= 3600;
- mm = sec / 60;
- sec %= 60;
-
- wr = snprintf(buf, buf_size, "%02ld:%02ld:%02ld", hh, mm, sec);
-
- return wr != 8;
-}
-
-char *thread_timer_to_hhmmss(char *buf, int buf_size,
- struct thread *t_timer)
-{
- if (t_timer) {
- time_hhmmss(buf, buf_size,
- thread_timer_remain_second(t_timer));
- } else {
- snprintf(buf, buf_size, "--:--:--");
- }
- return buf;
-}
-
-/* Get new thread. */
-static struct thread *thread_get(struct thread_master *m, uint8_t type,
- void (*func)(struct thread *), void *arg,
- const struct xref_threadsched *xref)
-{
- struct thread *thread = thread_list_pop(&m->unuse);
- struct cpu_thread_history tmp;
-
- if (!thread) {
- thread = XCALLOC(MTYPE_THREAD, sizeof(struct thread));
- /* mutex only needs to be initialized at struct creation. */
- pthread_mutex_init(&thread->mtx, NULL);
- m->alloc++;
- }
-
- thread->type = type;
- thread->add_type = type;
- thread->master = m;
- thread->arg = arg;
- thread->yield = THREAD_YIELD_TIME_SLOT; /* default */
- thread->ref = NULL;
- thread->ignore_timer_late = false;
-
- /*
- * So if the passed in funcname is not what we have
- * stored that means the thread->hist needs to be
- * updated. We keep the last one around in unused
- * under the assumption that we are probably
- * going to immediately allocate the same
- * type of thread.
- * This hopefully saves us some serious
- * hash_get lookups.
- */
- if ((thread->xref && thread->xref->funcname != xref->funcname)
- || thread->func != func) {
- tmp.func = func;
- tmp.funcname = xref->funcname;
- thread->hist =
- hash_get(m->cpu_record, &tmp,
- (void *(*)(void *))cpu_record_hash_alloc);
- }
- thread->hist->total_active++;
- thread->func = func;
- thread->xref = xref;
-
- return thread;
-}
-
-static void thread_free(struct thread_master *master, struct thread *thread)
-{
- /* Update statistics. */
- assert(master->alloc > 0);
- master->alloc--;
-
- /* Free allocated resources. */
- pthread_mutex_destroy(&thread->mtx);
- XFREE(MTYPE_THREAD, thread);
-}
-
-static int fd_poll(struct thread_master *m, const struct timeval *timer_wait,
- bool *eintr_p)
-{
- sigset_t origsigs;
- unsigned char trash[64];
- nfds_t count = m->handler.copycount;
-
- /*
- * If timer_wait is null here, that means poll() should block
- * indefinitely, unless the thread_master has overridden it by setting
- * ->selectpoll_timeout.
- *
- * If the value is positive, it specifies the maximum number of
- * milliseconds to wait. If the timeout is -1, it specifies that
- * we should never wait and always return immediately even if no
- * event is detected. If the value is zero, the behavior is default.
- */
- int timeout = -1;
-
- /* number of file descriptors with events */
- int num;
-
- if (timer_wait != NULL
- && m->selectpoll_timeout == 0) // use the default value
- timeout = (timer_wait->tv_sec * 1000)
- + (timer_wait->tv_usec / 1000);
- else if (m->selectpoll_timeout > 0) // use the user's timeout
- timeout = m->selectpoll_timeout;
- else if (m->selectpoll_timeout
- < 0) // effect a poll (return immediately)
- timeout = 0;
-
- zlog_tls_buffer_flush();
- rcu_read_unlock();
- rcu_assert_read_unlocked();
-
- /* add poll pipe poker */
- assert(count + 1 < m->handler.pfdsize);
- m->handler.copy[count].fd = m->io_pipe[0];
- m->handler.copy[count].events = POLLIN;
- m->handler.copy[count].revents = 0x00;
-
- /* We need to deal with a signal-handling race here: we
- * don't want to miss a crucial signal, such as SIGTERM or SIGINT,
- * that may arrive just before we enter poll(). We will block the
- * key signals, then check whether any have arrived - if so, we return
- * before calling poll(). If not, we'll re-enable the signals
- * in the ppoll() call.
- */
-
- sigemptyset(&origsigs);
- if (m->handle_signals) {
- /* Main pthread that handles the app signals */
- if (frr_sigevent_check(&origsigs)) {
- /* Signal to process - restore signal mask and return */
- pthread_sigmask(SIG_SETMASK, &origsigs, NULL);
- num = -1;
- *eintr_p = true;
- goto done;
- }
- } else {
- /* Don't make any changes for the non-main pthreads */
- pthread_sigmask(SIG_SETMASK, NULL, &origsigs);
- }
-
-#if defined(HAVE_PPOLL)
- struct timespec ts, *tsp;
-
- if (timeout >= 0) {
- ts.tv_sec = timeout / 1000;
- ts.tv_nsec = (timeout % 1000) * 1000000;
- tsp = &ts;
- } else
- tsp = NULL;
-
- num = ppoll(m->handler.copy, count + 1, tsp, &origsigs);
- pthread_sigmask(SIG_SETMASK, &origsigs, NULL);
-#else
- /* Not ideal - there is a race after we restore the signal mask */
- pthread_sigmask(SIG_SETMASK, &origsigs, NULL);
- num = poll(m->handler.copy, count + 1, timeout);
-#endif
-
-done:
-
- if (num < 0 && errno == EINTR)
- *eintr_p = true;
-
- if (num > 0 && m->handler.copy[count].revents != 0 && num--)
- while (read(m->io_pipe[0], &trash, sizeof(trash)) > 0)
- ;
-
- rcu_read_lock();
-
- return num;
-}
-
-/* Add new read thread. */
-void _thread_add_read_write(const struct xref_threadsched *xref,
- struct thread_master *m,
- void (*func)(struct thread *), void *arg, int fd,
- struct thread **t_ptr)
-{
- int dir = xref->thread_type;
- struct thread *thread = NULL;
- struct thread **thread_array;
-
- if (dir == THREAD_READ)
- frrtrace(9, frr_libfrr, schedule_read, m,
- xref->funcname, xref->xref.file, xref->xref.line,
- t_ptr, fd, 0, arg, 0);
- else
- frrtrace(9, frr_libfrr, schedule_write, m,
- xref->funcname, xref->xref.file, xref->xref.line,
- t_ptr, fd, 0, arg, 0);
-
- assert(fd >= 0);
- if (fd >= m->fd_limit)
- assert(!"Number of FD's open is greater than FRR currently configured to handle, aborting");
-
- frr_with_mutex (&m->mtx) {
- if (t_ptr && *t_ptr)
- // thread is already scheduled; don't reschedule
- break;
-
- /* default to a new pollfd */
- nfds_t queuepos = m->handler.pfdcount;
-
- if (dir == THREAD_READ)
- thread_array = m->read;
- else
- thread_array = m->write;
-
- /* if we already have a pollfd for our file descriptor, find and
- * use it */
- for (nfds_t i = 0; i < m->handler.pfdcount; i++)
- if (m->handler.pfds[i].fd == fd) {
- queuepos = i;
-
-#ifdef DEV_BUILD
- /*
- * What happens if we have a thread already
- * created for this event?
- */
- if (thread_array[fd])
- assert(!"Thread already scheduled for file descriptor");
-#endif
- break;
- }
-
- /* make sure we have room for this fd + pipe poker fd */
- assert(queuepos + 1 < m->handler.pfdsize);
-
- thread = thread_get(m, dir, func, arg, xref);
-
- m->handler.pfds[queuepos].fd = fd;
- m->handler.pfds[queuepos].events |=
- (dir == THREAD_READ ? POLLIN : POLLOUT);
-
- if (queuepos == m->handler.pfdcount)
- m->handler.pfdcount++;
-
- if (thread) {
- frr_with_mutex (&thread->mtx) {
- thread->u.fd = fd;
- thread_array[thread->u.fd] = thread;
- }
-
- if (t_ptr) {
- *t_ptr = thread;
- thread->ref = t_ptr;
- }
- }
-
- AWAKEN(m);
- }
-}
-
-static void _thread_add_timer_timeval(const struct xref_threadsched *xref,
- struct thread_master *m,
- void (*func)(struct thread *), void *arg,
- struct timeval *time_relative,
- struct thread **t_ptr)
-{
- struct thread *thread;
- struct timeval t;
-
- assert(m != NULL);
-
- assert(time_relative);
-
- frrtrace(9, frr_libfrr, schedule_timer, m,
- xref->funcname, xref->xref.file, xref->xref.line,
- t_ptr, 0, 0, arg, (long)time_relative->tv_sec);
-
- /* Compute expiration/deadline time. */
- monotime(&t);
- timeradd(&t, time_relative, &t);
-
- frr_with_mutex (&m->mtx) {
- if (t_ptr && *t_ptr)
- /* thread is already scheduled; don't reschedule */
- return;
-
- thread = thread_get(m, THREAD_TIMER, func, arg, xref);
-
- frr_with_mutex (&thread->mtx) {
- thread->u.sands = t;
- thread_timer_list_add(&m->timer, thread);
- if (t_ptr) {
- *t_ptr = thread;
- thread->ref = t_ptr;
- }
- }
-
- /* The timer list is sorted - if this new timer
- * might change the time we'll wait for, give the pthread
- * a chance to re-compute.
- */
- if (thread_timer_list_first(&m->timer) == thread)
- AWAKEN(m);
- }
-#define ONEYEAR2SEC (60 * 60 * 24 * 365)
- if (time_relative->tv_sec > ONEYEAR2SEC)
- flog_err(
- EC_LIB_TIMER_TOO_LONG,
- "Timer: %pTHD is created with an expiration that is greater than 1 year",
- thread);
-}
-
-
-/* Add timer event thread. */
-void _thread_add_timer(const struct xref_threadsched *xref,
- struct thread_master *m, void (*func)(struct thread *),
- void *arg, long timer, struct thread **t_ptr)
-{
- struct timeval trel;
-
- assert(m != NULL);
-
- trel.tv_sec = timer;
- trel.tv_usec = 0;
-
- _thread_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
-}
-
-/* Add timer event thread with "millisecond" resolution */
-void _thread_add_timer_msec(const struct xref_threadsched *xref,
- struct thread_master *m,
- void (*func)(struct thread *), void *arg,
- long timer, struct thread **t_ptr)
-{
- struct timeval trel;
-
- assert(m != NULL);
-
- trel.tv_sec = timer / 1000;
- trel.tv_usec = 1000 * (timer % 1000);
-
- _thread_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
-}
-
-/* Add timer event thread with "timeval" resolution */
-void _thread_add_timer_tv(const struct xref_threadsched *xref,
- struct thread_master *m,
- void (*func)(struct thread *), void *arg,
- struct timeval *tv, struct thread **t_ptr)
-{
- _thread_add_timer_timeval(xref, m, func, arg, tv, t_ptr);
-}
-
-/* Add simple event thread. */
-void _thread_add_event(const struct xref_threadsched *xref,
- struct thread_master *m, void (*func)(struct thread *),
- void *arg, int val, struct thread **t_ptr)
-{
- struct thread *thread = NULL;
-
- frrtrace(9, frr_libfrr, schedule_event, m,
- xref->funcname, xref->xref.file, xref->xref.line,
- t_ptr, 0, val, arg, 0);
-
- assert(m != NULL);
-
- frr_with_mutex (&m->mtx) {
- if (t_ptr && *t_ptr)
- /* thread is already scheduled; don't reschedule */
- break;
-
- thread = thread_get(m, THREAD_EVENT, func, arg, xref);
- frr_with_mutex (&thread->mtx) {
- thread->u.val = val;
- thread_list_add_tail(&m->event, thread);
- }
-
- if (t_ptr) {
- *t_ptr = thread;
- thread->ref = t_ptr;
- }
-
- AWAKEN(m);
- }
-}
-
-/* Thread cancellation ------------------------------------------------------ */
-
-/**
- * NOT's out the .events field of pollfd corresponding to the given file
- * descriptor. The event to be NOT'd is passed in the 'state' parameter.
- *
- * This needs to happen for both copies of pollfd's. See 'thread_fetch'
- * implementation for details.
- *
- * @param master
- * @param fd
- * @param state the event to cancel. One or more (OR'd together) of the
- * following:
- * - POLLIN
- * - POLLOUT
- */
-static void thread_cancel_rw(struct thread_master *master, int fd, short state,
- int idx_hint)
-{
- bool found = false;
-
- /* find the index of corresponding pollfd */
- nfds_t i;
-
- /* Cancel POLLHUP too just in case some bozo set it */
- state |= POLLHUP;
-
- /* Some callers know the index of the pfd already */
- if (idx_hint >= 0) {
- i = idx_hint;
- found = true;
- } else {
- /* Have to look for the fd in the pfd array */
- for (i = 0; i < master->handler.pfdcount; i++)
- if (master->handler.pfds[i].fd == fd) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- zlog_debug(
- "[!] Received cancellation request for nonexistent rw job");
- zlog_debug("[!] threadmaster: %s | fd: %d",
- master->name ? master->name : "", fd);
- return;
- }
-
- /* NOT out event. */
- master->handler.pfds[i].events &= ~(state);
-
- /* If all events are canceled, delete / resize the pollfd array. */
- if (master->handler.pfds[i].events == 0) {
- memmove(master->handler.pfds + i, master->handler.pfds + i + 1,
- (master->handler.pfdcount - i - 1)
- * sizeof(struct pollfd));
- master->handler.pfdcount--;
- master->handler.pfds[master->handler.pfdcount].fd = 0;
- master->handler.pfds[master->handler.pfdcount].events = 0;
- }
-
- /* If we have the same pollfd in the copy, perform the same operations,
- * otherwise return. */
- if (i >= master->handler.copycount)
- return;
-
- master->handler.copy[i].events &= ~(state);
-
- if (master->handler.copy[i].events == 0) {
- memmove(master->handler.copy + i, master->handler.copy + i + 1,
- (master->handler.copycount - i - 1)
- * sizeof(struct pollfd));
- master->handler.copycount--;
- master->handler.copy[master->handler.copycount].fd = 0;
- master->handler.copy[master->handler.copycount].events = 0;
- }
-}
-
-/*
- * Process task cancellation given a task argument: iterate through the
- * various lists of tasks, looking for any that match the argument.
- */
-static void cancel_arg_helper(struct thread_master *master,
- const struct cancel_req *cr)
-{
- struct thread *t;
- nfds_t i;
- int fd;
- struct pollfd *pfd;
-
- /* We're only processing arg-based cancellations here. */
- if (cr->eventobj == NULL)
- return;
-
- /* First process the ready lists. */
- frr_each_safe(thread_list, &master->event, t) {
- if (t->arg != cr->eventobj)
- continue;
- thread_list_del(&master->event, t);
- if (t->ref)
- *t->ref = NULL;
- thread_add_unuse(master, t);
- }
-
- frr_each_safe(thread_list, &master->ready, t) {
- if (t->arg != cr->eventobj)
- continue;
- thread_list_del(&master->ready, t);
- if (t->ref)
- *t->ref = NULL;
- thread_add_unuse(master, t);
- }
-
- /* If requested, stop here and ignore io and timers */
- if (CHECK_FLAG(cr->flags, THREAD_CANCEL_FLAG_READY))
- return;
-
- /* Check the io tasks */
- for (i = 0; i < master->handler.pfdcount;) {
- pfd = master->handler.pfds + i;
-
- if (pfd->events & POLLIN)
- t = master->read[pfd->fd];
- else
- t = master->write[pfd->fd];
-
- if (t && t->arg == cr->eventobj) {
- fd = pfd->fd;
-
- /* Found a match to cancel: clean up fd arrays */
- thread_cancel_rw(master, pfd->fd, pfd->events, i);
-
- /* Clean up thread arrays */
- master->read[fd] = NULL;
- master->write[fd] = NULL;
-
- /* Clear caller's ref */
- if (t->ref)
- *t->ref = NULL;
-
- thread_add_unuse(master, t);
-
- /* Don't increment 'i' since the cancellation will have
- * removed the entry from the pfd array
- */
- } else
- i++;
- }
-
- /* Check the timer tasks */
- t = thread_timer_list_first(&master->timer);
- while (t) {
- struct thread *t_next;
-
- t_next = thread_timer_list_next(&master->timer, t);
-
- if (t->arg == cr->eventobj) {
- thread_timer_list_del(&master->timer, t);
- if (t->ref)
- *t->ref = NULL;
- thread_add_unuse(master, t);
- }
-
- t = t_next;
- }
-}
-
-/**
- * Process cancellation requests.
- *
- * This may only be run from the pthread which owns the thread_master.
- *
- * @param master the thread master to process
- * @REQUIRE master->mtx
- */
-static void do_thread_cancel(struct thread_master *master)
-{
- struct thread_list_head *list = NULL;
- struct thread **thread_array = NULL;
- struct thread *thread;
- struct cancel_req *cr;
- struct listnode *ln;
-
- for (ALL_LIST_ELEMENTS_RO(master->cancel_req, ln, cr)) {
- /*
- * If this is an event object cancellation, search
- * through task lists deleting any tasks which have the
- * specified argument - use this handy helper function.
- */
- if (cr->eventobj) {
- cancel_arg_helper(master, cr);
- continue;
- }
-
- /*
- * The pointer varies depending on whether the cancellation
- * request was made asynchronously or not. If it was, we
- * need to check whether the thread even exists anymore
- * before cancelling it.
- */
- thread = (cr->thread) ? cr->thread : *cr->threadref;
-
- if (!thread)
- continue;
-
- list = NULL;
- thread_array = NULL;
-
- /* Determine the appropriate queue to cancel the thread from */
- switch (thread->type) {
- case THREAD_READ:
- thread_cancel_rw(master, thread->u.fd, POLLIN, -1);
- thread_array = master->read;
- break;
- case THREAD_WRITE:
- thread_cancel_rw(master, thread->u.fd, POLLOUT, -1);
- thread_array = master->write;
- break;
- case THREAD_TIMER:
- thread_timer_list_del(&master->timer, thread);
- break;
- case THREAD_EVENT:
- list = &master->event;
- break;
- case THREAD_READY:
- list = &master->ready;
- break;
- default:
- continue;
- break;
- }
-
- if (list) {
- thread_list_del(list, thread);
- } else if (thread_array) {
- thread_array[thread->u.fd] = NULL;
- }
-
- if (thread->ref)
- *thread->ref = NULL;
-
- thread_add_unuse(thread->master, thread);
- }
-
- /* Delete and free all cancellation requests */
- if (master->cancel_req)
- list_delete_all_node(master->cancel_req);
-
- /* Wake up any threads which may be blocked in thread_cancel_async() */
- master->canceled = true;
- pthread_cond_broadcast(&master->cancel_cond);
-}
-
-/*
- * Helper function used for multiple flavors of arg-based cancellation.
- */
-static void cancel_event_helper(struct thread_master *m, void *arg, int flags)
-{
- struct cancel_req *cr;
-
- assert(m->owner == pthread_self());
-
- /* Only worth anything if caller supplies an arg. */
- if (arg == NULL)
- return;
-
- cr = XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
-
- cr->flags = flags;
-
- frr_with_mutex (&m->mtx) {
- cr->eventobj = arg;
- listnode_add(m->cancel_req, cr);
- do_thread_cancel(m);
- }
-}
-
-/**
- * Cancel any events which have the specified argument.
- *
- * MT-Unsafe
- *
- * @param m the thread_master to cancel from
- * @param arg the argument passed when creating the event
- */
-void thread_cancel_event(struct thread_master *master, void *arg)
-{
- cancel_event_helper(master, arg, 0);
-}
-
-/*
- * Cancel ready tasks with an arg matching 'arg'
- *
- * MT-Unsafe
- *
- * @param m the thread_master to cancel from
- * @param arg the argument passed when creating the event
- */
-void thread_cancel_event_ready(struct thread_master *m, void *arg)
-{
-
- /* Only cancel ready/event tasks */
- cancel_event_helper(m, arg, THREAD_CANCEL_FLAG_READY);
-}
-
-/**
- * Cancel a specific task.
- *
- * MT-Unsafe
- *
- * @param thread task to cancel
- */
-void thread_cancel(struct thread **thread)
-{
- struct thread_master *master;
-
- if (thread == NULL || *thread == NULL)
- return;
-
- master = (*thread)->master;
-
- frrtrace(9, frr_libfrr, thread_cancel, master,
- (*thread)->xref->funcname, (*thread)->xref->xref.file,
- (*thread)->xref->xref.line, NULL, (*thread)->u.fd,
- (*thread)->u.val, (*thread)->arg, (*thread)->u.sands.tv_sec);
-
- assert(master->owner == pthread_self());
-
- frr_with_mutex (&master->mtx) {
- struct cancel_req *cr =
- XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
- cr->thread = *thread;
- listnode_add(master->cancel_req, cr);
- do_thread_cancel(master);
- }
-
- *thread = NULL;
-}
-
-/**
- * Asynchronous cancellation.
- *
- * Called with either a struct thread ** or void * to an event argument,
- * this function posts the correct cancellation request and blocks until it is
- * serviced.
- *
- * If the thread is currently running, execution blocks until it completes.
- *
- * The last two parameters are mutually exclusive, i.e. if you pass one the
- * other must be NULL.
- *
- * When the cancellation procedure executes on the target thread_master, the
- * thread * provided is checked for nullity. If it is null, the thread is
- * assumed to no longer exist and the cancellation request is a no-op. Thus
- * users of this API must pass a back-reference when scheduling the original
- * task.
- *
- * MT-Safe
- *
- * @param master the thread master with the relevant event / task
- * @param thread pointer to thread to cancel
- * @param eventobj the event
- */
-void thread_cancel_async(struct thread_master *master, struct thread **thread,
- void *eventobj)
-{
- assert(!(thread && eventobj) && (thread || eventobj));
-
- if (thread && *thread)
- frrtrace(9, frr_libfrr, thread_cancel_async, master,
- (*thread)->xref->funcname, (*thread)->xref->xref.file,
- (*thread)->xref->xref.line, NULL, (*thread)->u.fd,
- (*thread)->u.val, (*thread)->arg,
- (*thread)->u.sands.tv_sec);
- else
- frrtrace(9, frr_libfrr, thread_cancel_async, master, NULL, NULL,
- 0, NULL, 0, 0, eventobj, 0);
-
- assert(master->owner != pthread_self());
-
- frr_with_mutex (&master->mtx) {
- master->canceled = false;
-
- if (thread) {
- struct cancel_req *cr =
- XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
- cr->threadref = thread;
- listnode_add(master->cancel_req, cr);
- } else if (eventobj) {
- struct cancel_req *cr =
- XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
- cr->eventobj = eventobj;
- listnode_add(master->cancel_req, cr);
- }
- AWAKEN(master);
-
- while (!master->canceled)
- pthread_cond_wait(&master->cancel_cond, &master->mtx);
- }
-
- if (thread)
- *thread = NULL;
-}
-/* ------------------------------------------------------------------------- */
-
-static struct timeval *thread_timer_wait(struct thread_timer_list_head *timers,
- struct timeval *timer_val)
-{
- if (!thread_timer_list_count(timers))
- return NULL;
-
- struct thread *next_timer = thread_timer_list_first(timers);
- monotime_until(&next_timer->u.sands, timer_val);
- return timer_val;
-}
-
-static struct thread *thread_run(struct thread_master *m, struct thread *thread,
- struct thread *fetch)
-{
- *fetch = *thread;
- thread_add_unuse(m, thread);
- return fetch;
-}
-
-static int thread_process_io_helper(struct thread_master *m,
- struct thread *thread, short state,
- short actual_state, int pos)
-{
- struct thread **thread_array;
-
- /*
- * poll() clears the .events field, but the pollfd array we
- * pass to poll() is a copy of the one used to schedule threads.
- * We need to synchronize state between the two here by applying
- * the same changes poll() made on the copy of the "real" pollfd
- * array.
- *
- * This cleans up a possible infinite loop where we refuse
- * to respond to a poll event but poll is insistent that
- * we should.
- */
- m->handler.pfds[pos].events &= ~(state);
-
- if (!thread) {
- if ((actual_state & (POLLHUP|POLLIN)) != POLLHUP)
- flog_err(EC_LIB_NO_THREAD,
- "Attempting to process an I/O event but for fd: %d(%d) no thread to handle this!",
- m->handler.pfds[pos].fd, actual_state);
- return 0;
- }
-
- if (thread->type == THREAD_READ)
- thread_array = m->read;
- else
- thread_array = m->write;
-
- thread_array[thread->u.fd] = NULL;
- thread_list_add_tail(&m->ready, thread);
- thread->type = THREAD_READY;
-
- return 1;
-}
-
-/**
- * Process I/O events.
- *
- * Walks through file descriptor array looking for those pollfds whose .revents
- * field has something interesting. Deletes any invalid file descriptors.
- *
- * @param m the thread master
- * @param num the number of active file descriptors (return value of poll())
- */
-static void thread_process_io(struct thread_master *m, unsigned int num)
-{
- unsigned int ready = 0;
- struct pollfd *pfds = m->handler.copy;
-
- for (nfds_t i = 0; i < m->handler.copycount && ready < num; ++i) {
- /* no event for current fd? immediately continue */
- if (pfds[i].revents == 0)
- continue;
-
- ready++;
-
- /*
- * Unless someone has called thread_cancel from another
- * pthread, the only thing that could have changed in
- * m->handler.pfds while we were asleep is the .events
- * field in a given pollfd. Barring thread_cancel() that
- * value should be a superset of the values we have in our
- * copy, so there's no need to update it. Similarily,
- * barring deletion, the fd should still be a valid index
- * into the master's pfds.
- *
- * We are including POLLERR here to do a READ event
- * this is because the read should fail and the
- * read function should handle it appropriately
- */
- if (pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) {
- thread_process_io_helper(m, m->read[pfds[i].fd], POLLIN,
- pfds[i].revents, i);
- }
- if (pfds[i].revents & POLLOUT)
- thread_process_io_helper(m, m->write[pfds[i].fd],
- POLLOUT, pfds[i].revents, i);
-
- /* if one of our file descriptors is garbage, remove the same
- * from
- * both pfds + update sizes and index */
- if (pfds[i].revents & POLLNVAL) {
- memmove(m->handler.pfds + i, m->handler.pfds + i + 1,
- (m->handler.pfdcount - i - 1)
- * sizeof(struct pollfd));
- m->handler.pfdcount--;
- m->handler.pfds[m->handler.pfdcount].fd = 0;
- m->handler.pfds[m->handler.pfdcount].events = 0;
-
- memmove(pfds + i, pfds + i + 1,
- (m->handler.copycount - i - 1)
- * sizeof(struct pollfd));
- m->handler.copycount--;
- m->handler.copy[m->handler.copycount].fd = 0;
- m->handler.copy[m->handler.copycount].events = 0;
-
- i--;
- }
- }
-}
-
-/* Add all timers that have popped to the ready list. */
-static unsigned int thread_process_timers(struct thread_master *m,
- struct timeval *timenow)
-{
- struct timeval prev = *timenow;
- bool displayed = false;
- struct thread *thread;
- unsigned int ready = 0;
-
- while ((thread = thread_timer_list_first(&m->timer))) {
- if (timercmp(timenow, &thread->u.sands, <))
- break;
- prev = thread->u.sands;
- prev.tv_sec += 4;
- /*
- * If the timer would have popped 4 seconds in the
- * past then we are in a situation where we are
- * really getting behind on handling of events.
- * Let's log it and do the right thing with it.
- */
- if (timercmp(timenow, &prev, >)) {
- atomic_fetch_add_explicit(
- &thread->hist->total_starv_warn, 1,
- memory_order_seq_cst);
- if (!displayed && !thread->ignore_timer_late) {
- flog_warn(
- EC_LIB_STARVE_THREAD,
- "Thread Starvation: %pTHD was scheduled to pop greater than 4s ago",
- thread);
- displayed = true;
- }
- }
-
- thread_timer_list_pop(&m->timer);
- thread->type = THREAD_READY;
- thread_list_add_tail(&m->ready, thread);
- ready++;
- }
-
- return ready;
-}
-
-/* process a list en masse, e.g. for event thread lists */
-static unsigned int thread_process(struct thread_list_head *list)
-{
- struct thread *thread;
- unsigned int ready = 0;
-
- while ((thread = thread_list_pop(list))) {
- thread->type = THREAD_READY;
- thread_list_add_tail(&thread->master->ready, thread);
- ready++;
- }
- return ready;
-}
-
-
-/* Fetch next ready thread. */
-struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
-{
- struct thread *thread = NULL;
- struct timeval now;
- struct timeval zerotime = {0, 0};
- struct timeval tv;
- struct timeval *tw = NULL;
- bool eintr_p = false;
- int num = 0;
-
- do {
- /* Handle signals if any */
- if (m->handle_signals)
- frr_sigevent_process();
-
- pthread_mutex_lock(&m->mtx);
-
- /* Process any pending cancellation requests */
- do_thread_cancel(m);
-
- /*
- * Attempt to flush ready queue before going into poll().
- * This is performance-critical. Think twice before modifying.
- */
- if ((thread = thread_list_pop(&m->ready))) {
- fetch = thread_run(m, thread, fetch);
- if (fetch->ref)
- *fetch->ref = NULL;
- pthread_mutex_unlock(&m->mtx);
- if (!m->ready_run_loop)
- GETRUSAGE(&m->last_getrusage);
- m->ready_run_loop = true;
- break;
- }
-
- m->ready_run_loop = false;
- /* otherwise, tick through scheduling sequence */
-
- /*
- * Post events to ready queue. This must come before the
- * following block since events should occur immediately
- */
- thread_process(&m->event);
-
- /*
- * If there are no tasks on the ready queue, we will poll()
- * until a timer expires or we receive I/O, whichever comes
- * first. The strategy for doing this is:
- *
- * - If there are events pending, set the poll() timeout to zero
- * - If there are no events pending, but there are timers
- * pending, set the timeout to the smallest remaining time on
- * any timer.
- * - If there are neither timers nor events pending, but there
- * are file descriptors pending, block indefinitely in poll()
- * - If nothing is pending, it's time for the application to die
- *
- * In every case except the last, we need to hit poll() at least
- * once per loop to avoid starvation by events
- */
- if (!thread_list_count(&m->ready))
- tw = thread_timer_wait(&m->timer, &tv);
-
- if (thread_list_count(&m->ready) ||
- (tw && !timercmp(tw, &zerotime, >)))
- tw = &zerotime;
-
- if (!tw && m->handler.pfdcount == 0) { /* die */
- pthread_mutex_unlock(&m->mtx);
- fetch = NULL;
- break;
- }
-
- /*
- * Copy pollfd array + # active pollfds in it. Not necessary to
- * copy the array size as this is fixed.
- */
- m->handler.copycount = m->handler.pfdcount;
- memcpy(m->handler.copy, m->handler.pfds,
- m->handler.copycount * sizeof(struct pollfd));
-
- pthread_mutex_unlock(&m->mtx);
- {
- eintr_p = false;
- num = fd_poll(m, tw, &eintr_p);
- }
- pthread_mutex_lock(&m->mtx);
-
- /* Handle any errors received in poll() */
- if (num < 0) {
- if (eintr_p) {
- pthread_mutex_unlock(&m->mtx);
- /* loop around to signal handler */
- continue;
- }
-
- /* else die */
- flog_err(EC_LIB_SYSTEM_CALL, "poll() error: %s",
- safe_strerror(errno));
- pthread_mutex_unlock(&m->mtx);
- fetch = NULL;
- break;
- }
-
- /* Post timers to ready queue. */
- monotime(&now);
- thread_process_timers(m, &now);
-
- /* Post I/O to ready queue. */
- if (num > 0)
- thread_process_io(m, num);
-
- pthread_mutex_unlock(&m->mtx);
-
- } while (!thread && m->spin);
-
- return fetch;
-}
-
-static unsigned long timeval_elapsed(struct timeval a, struct timeval b)
-{
- return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO)
- + (a.tv_usec - b.tv_usec));
-}
-
-unsigned long thread_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
- unsigned long *cputime)
-{
-#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
-
-#ifdef __FreeBSD__
- /*
- * FreeBSD appears to have an issue when calling clock_gettime
- * with CLOCK_THREAD_CPUTIME_ID really close to each other
- * occassionally the now time will be before the start time.
- * This is not good and FRR is ending up with CPU HOG's
- * when the subtraction wraps to very large numbers
- *
- * What we are going to do here is cheat a little bit
- * and notice that this is a problem and just correct
- * it so that it is impossible to happen
- */
- if (start->cpu.tv_sec == now->cpu.tv_sec &&
- start->cpu.tv_nsec > now->cpu.tv_nsec)
- now->cpu.tv_nsec = start->cpu.tv_nsec + 1;
- else if (start->cpu.tv_sec > now->cpu.tv_sec) {
- now->cpu.tv_sec = start->cpu.tv_sec;
- now->cpu.tv_nsec = start->cpu.tv_nsec + 1;
- }
-#endif
- *cputime = (now->cpu.tv_sec - start->cpu.tv_sec) * TIMER_SECOND_MICRO
- + (now->cpu.tv_nsec - start->cpu.tv_nsec) / 1000;
-#else
- /* This is 'user + sys' time. */
- *cputime = timeval_elapsed(now->cpu.ru_utime, start->cpu.ru_utime)
- + timeval_elapsed(now->cpu.ru_stime, start->cpu.ru_stime);
-#endif
- return timeval_elapsed(now->real, start->real);
-}
-
-/* We should aim to yield after yield milliseconds, which defaults
- to THREAD_YIELD_TIME_SLOT .
- Note: we are using real (wall clock) time for this calculation.
- It could be argued that CPU time may make more sense in certain
- contexts. The things to consider are whether the thread may have
- blocked (in which case wall time increases, but CPU time does not),
- or whether the system is heavily loaded with other processes competing
- for CPU time. On balance, wall clock time seems to make sense.
- Plus it has the added benefit that gettimeofday should be faster
- than calling getrusage. */
-int thread_should_yield(struct thread *thread)
-{
- int result;
- frr_with_mutex (&thread->mtx) {
- result = monotime_since(&thread->real, NULL)
- > (int64_t)thread->yield;
- }
- return result;
-}
-
-void thread_set_yield_time(struct thread *thread, unsigned long yield_time)
-{
- frr_with_mutex (&thread->mtx) {
- thread->yield = yield_time;
- }
-}
-
-void thread_getrusage(RUSAGE_T *r)
-{
- monotime(&r->real);
- if (!cputime_enabled) {
- memset(&r->cpu, 0, sizeof(r->cpu));
- return;
- }
-
-#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
- /* not currently implemented in Linux's vDSO, but maybe at some point
- * in the future?
- */
- clock_gettime(CLOCK_THREAD_CPUTIME_ID, &r->cpu);
-#else /* !HAVE_CLOCK_THREAD_CPUTIME_ID */
-#if defined RUSAGE_THREAD
-#define FRR_RUSAGE RUSAGE_THREAD
-#else
-#define FRR_RUSAGE RUSAGE_SELF
-#endif
- getrusage(FRR_RUSAGE, &(r->cpu));
-#endif
-}
-
-/*
- * Call a thread.
- *
- * This function will atomically update the thread's usage history. At present
- * this is the only spot where usage history is written. Nevertheless the code
- * has been written such that the introduction of writers in the future should
- * not need to update it provided the writers atomically perform only the
- * operations done here, i.e. updating the total and maximum times. In
- * particular, the maximum real and cpu times must be monotonically increasing
- * or this code is not correct.
- */
-void thread_call(struct thread *thread)
-{
- RUSAGE_T before, after;
-
- /* if the thread being called is the CLI, it may change cputime_enabled
- * ("service cputime-stats" command), which can result in nonsensical
- * and very confusing warnings
- */
- bool cputime_enabled_here = cputime_enabled;
-
- if (thread->master->ready_run_loop)
- before = thread->master->last_getrusage;
- else
- GETRUSAGE(&before);
-
- thread->real = before.real;
-
- frrtrace(9, frr_libfrr, thread_call, thread->master,
- thread->xref->funcname, thread->xref->xref.file,
- thread->xref->xref.line, NULL, thread->u.fd,
- thread->u.val, thread->arg, thread->u.sands.tv_sec);
-
- pthread_setspecific(thread_current, thread);
- (*thread->func)(thread);
- pthread_setspecific(thread_current, NULL);
-
- GETRUSAGE(&after);
- thread->master->last_getrusage = after;
-
- unsigned long walltime, cputime;
- unsigned long exp;
-
- walltime = thread_consumed_time(&after, &before, &cputime);
-
- /* update walltime */
- atomic_fetch_add_explicit(&thread->hist->real.total, walltime,
- memory_order_seq_cst);
- exp = atomic_load_explicit(&thread->hist->real.max,
- memory_order_seq_cst);
- while (exp < walltime
- && !atomic_compare_exchange_weak_explicit(
- &thread->hist->real.max, &exp, walltime,
- memory_order_seq_cst, memory_order_seq_cst))
- ;
-
- if (cputime_enabled_here && cputime_enabled) {
- /* update cputime */
- atomic_fetch_add_explicit(&thread->hist->cpu.total, cputime,
- memory_order_seq_cst);
- exp = atomic_load_explicit(&thread->hist->cpu.max,
- memory_order_seq_cst);
- while (exp < cputime
- && !atomic_compare_exchange_weak_explicit(
- &thread->hist->cpu.max, &exp, cputime,
- memory_order_seq_cst, memory_order_seq_cst))
- ;
- }
-
- atomic_fetch_add_explicit(&thread->hist->total_calls, 1,
- memory_order_seq_cst);
- atomic_fetch_or_explicit(&thread->hist->types, 1 << thread->add_type,
- memory_order_seq_cst);
-
- if (cputime_enabled_here && cputime_enabled && cputime_threshold
- && cputime > cputime_threshold) {
- /*
- * We have a CPU Hog on our hands. The time FRR has spent
- * doing actual work (not sleeping) is greater than 5 seconds.
- * Whinge about it now, so we're aware this is yet another task
- * to fix.
- */
- atomic_fetch_add_explicit(&thread->hist->total_cpu_warn,
- 1, memory_order_seq_cst);
- flog_warn(
- EC_LIB_SLOW_THREAD_CPU,
- "CPU HOG: task %s (%lx) ran for %lums (cpu time %lums)",
- thread->xref->funcname, (unsigned long)thread->func,
- walltime / 1000, cputime / 1000);
-
- } else if (walltime_threshold && walltime > walltime_threshold) {
- /*
- * The runtime for a task is greater than 5 seconds, but the
- * cpu time is under 5 seconds. Let's whine about this because
- * this could imply some sort of scheduling issue.
- */
- atomic_fetch_add_explicit(&thread->hist->total_wall_warn,
- 1, memory_order_seq_cst);
- flog_warn(
- EC_LIB_SLOW_THREAD_WALL,
- "STARVATION: task %s (%lx) ran for %lums (cpu time %lums)",
- thread->xref->funcname, (unsigned long)thread->func,
- walltime / 1000, cputime / 1000);
- }
-}
-
-/* Execute thread */
-void _thread_execute(const struct xref_threadsched *xref,
- struct thread_master *m, void (*func)(struct thread *),
- void *arg, int val)
-{
- struct thread *thread;
-
- /* Get or allocate new thread to execute. */
- frr_with_mutex (&m->mtx) {
- thread = thread_get(m, THREAD_EVENT, func, arg, xref);
-
- /* Set its event value. */
- frr_with_mutex (&thread->mtx) {
- thread->add_type = THREAD_EXECUTE;
- thread->u.val = val;
- thread->ref = &thread;
- }
- }
-
- /* Execute thread doing all accounting. */
- thread_call(thread);
-
- /* Give back or free thread. */
- thread_add_unuse(m, thread);
-}
-
-/* Debug signal mask - if 'sigs' is NULL, use current effective mask. */
-void debug_signals(const sigset_t *sigs)
-{
- int i, found;
- sigset_t tmpsigs;
- char buf[300];
-
- /*
- * We're only looking at the non-realtime signals here, so we need
- * some limit value. Platform differences mean at some point we just
- * need to pick a reasonable value.
- */
-#if defined SIGRTMIN
-# define LAST_SIGNAL SIGRTMIN
-#else
-# define LAST_SIGNAL 32
-#endif
-
-
- if (sigs == NULL) {
- sigemptyset(&tmpsigs);
- pthread_sigmask(SIG_BLOCK, NULL, &tmpsigs);
- sigs = &tmpsigs;
- }
-
- found = 0;
- buf[0] = '\0';
-
- for (i = 0; i < LAST_SIGNAL; i++) {
- char tmp[20];
-
- if (sigismember(sigs, i) > 0) {
- if (found > 0)
- strlcat(buf, ",", sizeof(buf));
- snprintf(tmp, sizeof(tmp), "%d", i);
- strlcat(buf, tmp, sizeof(buf));
- found++;
- }
- }
-
- if (found == 0)
- snprintf(buf, sizeof(buf), "<none>");
-
- zlog_debug("%s: %s", __func__, buf);
-}
-
-static ssize_t printfrr_thread_dbg(struct fbuf *buf, struct printfrr_eargs *ea,
- const struct thread *thread)
-{
- static const char * const types[] = {
- [THREAD_READ] = "read",
- [THREAD_WRITE] = "write",
- [THREAD_TIMER] = "timer",
- [THREAD_EVENT] = "event",
- [THREAD_READY] = "ready",
- [THREAD_UNUSED] = "unused",
- [THREAD_EXECUTE] = "exec",
- };
- ssize_t rv = 0;
- char info[16] = "";
-
- if (!thread)
- return bputs(buf, "{(thread *)NULL}");
-
- rv += bprintfrr(buf, "{(thread *)%p arg=%p", thread, thread->arg);
-
- if (thread->type < array_size(types) && types[thread->type])
- rv += bprintfrr(buf, " %-6s", types[thread->type]);
- else
- rv += bprintfrr(buf, " INVALID(%u)", thread->type);
-
- switch (thread->type) {
- case THREAD_READ:
- case THREAD_WRITE:
- snprintfrr(info, sizeof(info), "fd=%d", thread->u.fd);
- break;
-
- case THREAD_TIMER:
- snprintfrr(info, sizeof(info), "r=%pTVMud", &thread->u.sands);
- break;
- }
-
- rv += bprintfrr(buf, " %-12s %s() %s from %s:%d}", info,
- thread->xref->funcname, thread->xref->dest,
- thread->xref->xref.file, thread->xref->xref.line);
- return rv;
-}
-
-printfrr_ext_autoreg_p("TH", printfrr_thread);
-static ssize_t printfrr_thread(struct fbuf *buf, struct printfrr_eargs *ea,
- const void *ptr)
-{
- const struct thread *thread = ptr;
- struct timespec remain = {};
-
- if (ea->fmt[0] == 'D') {
- ea->fmt++;
- return printfrr_thread_dbg(buf, ea, thread);
- }
-
- if (!thread) {
- /* need to jump over time formatting flag characters in the
- * input format string, i.e. adjust ea->fmt!
- */
- printfrr_time(buf, ea, &remain,
- TIMEFMT_TIMER_DEADLINE | TIMEFMT_SKIP);
- return bputch(buf, '-');
- }
-
- TIMEVAL_TO_TIMESPEC(&thread->u.sands, &remain);
- return printfrr_time(buf, ea, &remain, TIMEFMT_TIMER_DEADLINE);
-}
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Thread management routine header.
- * Copyright (C) 1998 Kunihiro Ishiguro
- */
-
-#ifndef _ZEBRA_THREAD_H
-#define _ZEBRA_THREAD_H
-
-#include <zebra.h>
-#include <pthread.h>
-#include <poll.h>
-#include "monotime.h"
-#include "frratomic.h"
-#include "typesafe.h"
-#include "xref.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-extern bool cputime_enabled;
-extern unsigned long cputime_threshold;
-/* capturing wallclock time is always enabled since it is fast (reading
- * hardware TSC w/o syscalls)
- */
-extern unsigned long walltime_threshold;
-
-struct rusage_t {
-#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
- struct timespec cpu;
-#else
- struct rusage cpu;
-#endif
- struct timeval real;
-};
-#define RUSAGE_T struct rusage_t
-
-#define GETRUSAGE(X) thread_getrusage(X)
-
-PREDECL_LIST(thread_list);
-PREDECL_HEAP(thread_timer_list);
-
-struct fd_handler {
- /* number of pfd that fit in the allocated space of pfds. This is a
- * constant and is the same for both pfds and copy.
- */
- nfds_t pfdsize;
-
- /* file descriptors to monitor for i/o */
- struct pollfd *pfds;
- /* number of pollfds stored in pfds */
- nfds_t pfdcount;
-
- /* chunk used for temp copy of pollfds */
- struct pollfd *copy;
- /* number of pollfds stored in copy */
- nfds_t copycount;
-};
-
-struct xref_threadsched {
- struct xref xref;
-
- const char *funcname;
- const char *dest;
- uint32_t thread_type;
-};
-
-/* Master of the theads. */
-struct thread_master {
- char *name;
-
- struct thread **read;
- struct thread **write;
- struct thread_timer_list_head timer;
- struct thread_list_head event, ready, unuse;
- struct list *cancel_req;
- bool canceled;
- pthread_cond_t cancel_cond;
- struct hash *cpu_record;
- int io_pipe[2];
- int fd_limit;
- struct fd_handler handler;
- unsigned long alloc;
- long selectpoll_timeout;
- bool spin;
- bool handle_signals;
- pthread_mutex_t mtx;
- pthread_t owner;
-
- bool ready_run_loop;
- RUSAGE_T last_getrusage;
-};
-
-/* Thread itself. */
-struct thread {
- uint8_t type; /* thread type */
- uint8_t add_type; /* thread type */
- struct thread_list_item threaditem;
- struct thread_timer_list_item timeritem;
- struct thread **ref; /* external reference (if given) */
- struct thread_master *master; /* pointer to the struct thread_master */
- void (*func)(struct thread *); /* event function */
- void *arg; /* event argument */
- union {
- int val; /* second argument of the event. */
- int fd; /* file descriptor in case of r/w */
- struct timeval sands; /* rest of time sands value. */
- } u;
- struct timeval real;
- struct cpu_thread_history *hist; /* cache pointer to cpu_history */
- unsigned long yield; /* yield time in microseconds */
- const struct xref_threadsched *xref; /* origin location */
- pthread_mutex_t mtx; /* mutex for thread.c functions */
- bool ignore_timer_late;
-};
-
-#ifdef _FRR_ATTRIBUTE_PRINTFRR
-#pragma FRR printfrr_ext "%pTH" (struct thread *)
-#endif
-
-struct cpu_thread_history {
- void (*func)(struct thread *);
- atomic_size_t total_cpu_warn;
- atomic_size_t total_wall_warn;
- atomic_size_t total_starv_warn;
- atomic_size_t total_calls;
- atomic_size_t total_active;
- struct time_stats {
- atomic_size_t total, max;
- } real;
- struct time_stats cpu;
- atomic_uint_fast32_t types;
- const char *funcname;
-};
-
-/* Struct timeval's tv_usec one second value. */
-#define TIMER_SECOND_MICRO 1000000L
-
-/* Thread types. */
-#define THREAD_READ 0
-#define THREAD_WRITE 1
-#define THREAD_TIMER 2
-#define THREAD_EVENT 3
-#define THREAD_READY 4
-#define THREAD_UNUSED 5
-#define THREAD_EXECUTE 6
-
-/* Thread yield time. */
-#define THREAD_YIELD_TIME_SLOT 10 * 1000L /* 10ms */
-
-#define THREAD_TIMER_STRLEN 12
-
-/* Macros. */
-#define THREAD_ARG(X) ((X)->arg)
-#define THREAD_FD(X) ((X)->u.fd)
-#define THREAD_VAL(X) ((X)->u.val)
-
-/*
- * Please consider this macro deprecated, and do not use it in new code.
- */
-#define THREAD_OFF(thread) \
- do { \
- if ((thread)) \
- thread_cancel(&(thread)); \
- } while (0)
-
-/*
- * Macro wrappers to generate xrefs for all thread add calls. Includes
- * file/line/function info for debugging/tracing.
- */
-#include "lib/xref.h"
-
-#define _xref_t_a(addfn, type, m, f, a, v, t) \
- ({ \
- static const struct xref_threadsched _xref \
- __attribute__((used)) = { \
- .xref = XREF_INIT(XREFT_THREADSCHED, NULL, __func__), \
- .funcname = #f, \
- .dest = #t, \
- .thread_type = THREAD_ ## type, \
- }; \
- XREF_LINK(_xref.xref); \
- _thread_add_ ## addfn(&_xref, m, f, a, v, t); \
- }) \
- /* end */
-
-#define thread_add_read(m,f,a,v,t) _xref_t_a(read_write, READ, m,f,a,v,t)
-#define thread_add_write(m,f,a,v,t) _xref_t_a(read_write, WRITE, m,f,a,v,t)
-#define thread_add_timer(m,f,a,v,t) _xref_t_a(timer, TIMER, m,f,a,v,t)
-#define thread_add_timer_msec(m,f,a,v,t) _xref_t_a(timer_msec, TIMER, m,f,a,v,t)
-#define thread_add_timer_tv(m,f,a,v,t) _xref_t_a(timer_tv, TIMER, m,f,a,v,t)
-#define thread_add_event(m,f,a,v,t) _xref_t_a(event, EVENT, m,f,a,v,t)
-
-#define thread_execute(m,f,a,v) \
- ({ \
- static const struct xref_threadsched _xref \
- __attribute__((used)) = { \
- .xref = XREF_INIT(XREFT_THREADSCHED, NULL, __func__), \
- .funcname = #f, \
- .dest = NULL, \
- .thread_type = THREAD_EXECUTE, \
- }; \
- XREF_LINK(_xref.xref); \
- _thread_execute(&_xref, m, f, a, v); \
- }) /* end */
-
-/* Prototypes. */
-extern struct thread_master *thread_master_create(const char *);
-void thread_master_set_name(struct thread_master *master, const char *name);
-extern void thread_master_free(struct thread_master *);
-extern void thread_master_free_unused(struct thread_master *);
-
-extern void _thread_add_read_write(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*fn)(struct thread *), void *arg,
- int fd, struct thread **tref);
-
-extern void _thread_add_timer(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*fn)(struct thread *), void *arg, long t,
- struct thread **tref);
-
-extern void _thread_add_timer_msec(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*fn)(struct thread *), void *arg,
- long t, struct thread **tref);
-
-extern void _thread_add_timer_tv(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*fn)(struct thread *), void *arg,
- struct timeval *tv, struct thread **tref);
-
-extern void _thread_add_event(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*fn)(struct thread *), void *arg, int val,
- struct thread **tref);
-
-extern void _thread_execute(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*fn)(struct thread *), void *arg, int val);
-
-extern void thread_cancel(struct thread **event);
-extern void thread_cancel_async(struct thread_master *, struct thread **,
- void *);
-/* Cancel ready tasks with an arg matching 'arg' */
-extern void thread_cancel_event_ready(struct thread_master *m, void *arg);
-/* Cancel all tasks with an arg matching 'arg', including timers and io */
-extern void thread_cancel_event(struct thread_master *m, void *arg);
-extern struct thread *thread_fetch(struct thread_master *, struct thread *);
-extern void thread_call(struct thread *);
-extern unsigned long thread_timer_remain_second(struct thread *);
-extern struct timeval thread_timer_remain(struct thread *);
-extern unsigned long thread_timer_remain_msec(struct thread *);
-extern int thread_should_yield(struct thread *);
-/* set yield time for thread */
-extern void thread_set_yield_time(struct thread *, unsigned long);
-
-/* Internal libfrr exports */
-extern void thread_getrusage(RUSAGE_T *);
-extern void thread_cmd_init(void);
-
-/* Returns elapsed real (wall clock) time. */
-extern unsigned long thread_consumed_time(RUSAGE_T *after, RUSAGE_T *before,
- unsigned long *cpu_time_elapsed);
-
-/* only for use in logging functions! */
-extern pthread_key_t thread_current;
-extern char *thread_timer_to_hhmmss(char *buf, int buf_size,
- struct thread *t_timer);
-
-static inline bool thread_is_scheduled(struct thread *thread)
-{
- if (thread)
- return true;
-
- return false;
-}
-
-/* Debug signal mask */
-void debug_signals(const sigset_t *sigs);
-
-static inline void thread_ignore_late_timer(struct thread *thread)
-{
- thread->ignore_timer_late = true;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _ZEBRA_THREAD_H */
{
struct hash *vrf_hash = bmap;
- if (vrf_hash == NULL)
- return;
-
- hash_clean(vrf_hash, vrf_hash_bitmap_free);
- hash_free(vrf_hash);
+ hash_clean_and_free(&vrf_hash, vrf_hash_bitmap_free);
}
void vrf_bitmap_set(vrf_bitmap_t bmap, vrf_id_t vrf_id)
#include <stdio.h>
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "buffer.h"
#include "command.h"
#include "sockunion.h"
#endif /* VTYSH */
};
+struct nb_config *vty_mgmt_candidate_config;
+
+static uintptr_t mgmt_lib_hndl;
+static bool mgmt_fe_connected;
+static bool mgmt_candidate_ds_wr_locked;
+static uint64_t mgmt_client_id_next;
+static uint64_t mgmt_last_req_id = UINT64_MAX;
+
PREDECL_DLIST(vtyservs);
struct vty_serv {
int sock;
bool vtysh;
- struct thread *t_accept;
+ struct event *t_accept;
};
DECLARE_DLIST(vtyservs, struct vty_serv, itm);
static void vty_event_serv(enum vty_event event, struct vty_serv *);
static void vty_event(enum vty_event, struct vty *);
+static int vtysh_flush(struct vty *vty);
/* Extern host structure from command.c */
extern struct host host;
static bool do_log_commands;
static bool do_log_commands_perm;
+void vty_mgmt_resume_response(struct vty *vty, bool success)
+{
+ uint8_t header[4] = {0, 0, 0, 0};
+ int ret = CMD_SUCCESS;
+
+ if (!vty->mgmt_req_pending) {
+ zlog_err(
+ "vty response called without setting mgmt_req_pending");
+ return;
+ }
+
+ if (!success)
+ ret = CMD_WARNING_CONFIG_FAILED;
+
+ vty->mgmt_req_pending = false;
+ header[3] = ret;
+ buffer_put(vty->obuf, header, 4);
+
+ if (!vty->t_write && (vtysh_flush(vty) < 0))
+ /* Try to flush results; exit if a write
+ * error occurs.
+ */
+ return;
+
+ if (vty->status == VTY_CLOSE)
+ vty_close(vty);
+ else
+ vty_event(VTYSH_READ, vty);
+}
+
void vty_frame(struct vty *vty, const char *format, ...)
{
va_list args;
GETRUSAGE(&after);
- walltime = thread_consumed_time(&after, &before, &cputime);
+ walltime = event_consumed_time(&after, &before, &cputime);
if (cputime_enabled_here && cputime_enabled && cputime_threshold
&& cputime > cputime_threshold)
}
/* Read data via vty socket. */
-static void vty_read(struct thread *thread)
+static void vty_read(struct event *thread)
{
int i;
int nbytes;
unsigned char buf[VTY_READ_BUFSIZ];
- struct vty *vty = THREAD_ARG(thread);
+ struct vty *vty = EVENT_ARG(thread);
/* Read raw data from socket */
if ((nbytes = read(vty->fd, buf, VTY_READ_BUFSIZ)) <= 0) {
}
/* Flush buffer to the vty. */
-static void vty_flush(struct thread *thread)
+static void vty_flush(struct event *thread)
{
int erase;
buffer_status_t flushrc;
- struct vty *vty = THREAD_ARG(thread);
+ struct vty *vty = EVENT_ARG(thread);
/* Tempolary disable read thread. */
if (vty->lines == 0)
- THREAD_OFF(vty->t_read);
+ EVENT_OFF(vty->t_read);
/* Function execution continue. */
erase = ((vty->status == VTY_MORE || vty->status == VTY_MORELINE));
new->max = VTY_BUFSIZ;
new->pass_fd = -1;
+ if (mgmt_lib_hndl) {
+ new->mgmt_client_id = mgmt_client_id_next++;
+ if (mgmt_fe_create_client_session(
+ mgmt_lib_hndl, new->mgmt_client_id,
+ (uintptr_t) new) != MGMTD_SUCCESS)
+ zlog_err(
+ "Failed to open a MGMTD Frontend session for VTY session %p!!",
+ new);
+ }
+
return new;
}
if (!stdio_vty)
return;
- THREAD_OFF(stdio_vty->t_write);
- THREAD_OFF(stdio_vty->t_read);
- THREAD_OFF(stdio_vty->t_timeout);
+ EVENT_OFF(stdio_vty->t_write);
+ EVENT_OFF(stdio_vty->t_read);
+ EVENT_OFF(stdio_vty->t_timeout);
if (stdio_termios)
tcsetattr(0, TCSANOW, &stdio_orig_termios);
}
/* Accept connection from the network. */
-static void vty_accept(struct thread *thread)
+static void vty_accept(struct event *thread)
{
- struct vty_serv *vtyserv = THREAD_ARG(thread);
+ struct vty_serv *vtyserv = EVENT_ARG(thread);
int vty_sock;
union sockunion su;
int ret;
/* #define VTYSH_DEBUG 1 */
-static void vtysh_accept(struct thread *thread)
+static void vtysh_accept(struct event *thread)
{
- struct vty_serv *vtyserv = THREAD_ARG(thread);
+ struct vty_serv *vtyserv = EVENT_ARG(thread);
int accept_sock = vtyserv->sock;
int sock;
int client_len;
vty->pass_fd = fd;
}
-static void vtysh_read(struct thread *thread)
+static void vtysh_read(struct event *thread)
{
int ret;
int sock;
unsigned char *p;
uint8_t header[4] = {0, 0, 0, 0};
- sock = THREAD_FD(thread);
- vty = THREAD_ARG(thread);
+ sock = EVENT_FD(thread);
+ vty = EVENT_ARG(thread);
if ((nbytes = read(sock, buf, VTY_READ_BUFSIZ)) <= 0) {
if (nbytes < 0) {
if (ret == CMD_SUSPEND)
break;
+ /* with new infra we need to stop response till
+ * we get response through callback.
+ */
+ if (vty->mgmt_req_pending)
+ return;
+
/* warning: watchfrr hardcodes this result write
*/
header[3] = ret;
vty_event(VTYSH_READ, vty);
}
-static void vtysh_write(struct thread *thread)
+static void vtysh_write(struct event *thread)
{
- struct vty *vty = THREAD_ARG(thread);
+ struct vty *vty = EVENT_ARG(thread);
vtysh_flush(vty);
}
int i;
bool was_stdio = false;
+ if (mgmt_lib_hndl) {
+ mgmt_fe_destroy_client_session(mgmt_lib_hndl,
+ vty->mgmt_client_id);
+ vty->mgmt_session_id = 0;
+ }
+
/* Drop out of configure / transaction if needed. */
vty_config_exit(vty);
/* Cancel threads.*/
- THREAD_OFF(vty->t_read);
- THREAD_OFF(vty->t_write);
- THREAD_OFF(vty->t_timeout);
+ EVENT_OFF(vty->t_read);
+ EVENT_OFF(vty->t_write);
+ EVENT_OFF(vty->t_timeout);
if (vty->pass_fd != -1) {
close(vty->pass_fd);
}
/* When time out occur output message then close connection. */
-static void vty_timeout(struct thread *thread)
+static void vty_timeout(struct event *thread)
{
struct vty *vty;
- vty = THREAD_ARG(thread);
+ vty = EVENT_ARG(thread);
vty->v_timeout = 0;
/* Clear buffer*/
return CMD_WARNING;
}
+ if (vty_mgmt_fe_enabled()) {
+ if (!mgmt_candidate_ds_wr_locked) {
+ if (vty_mgmt_send_lockds_req(vty, MGMTD_DS_CANDIDATE,
+ true) != 0) {
+ vty_out(vty, "Not able to lock candidate DS\n");
+ return CMD_WARNING;
+ }
+ } else {
+ vty_out(vty,
+ "Candidate DS already locked by different session\n");
+ return CMD_WARNING;
+ }
+
+ vty->mgmt_locked_candidate_ds = true;
+ mgmt_candidate_ds_wr_locked = true;
+ }
+
vty->node = CONFIG_NODE;
vty->config = true;
vty->private_config = private_config;
vty_out(vty,
"Warning: uncommitted changes will be discarded on exit.\n\n");
} else {
- vty->candidate_config = vty_shared_candidate_config;
+ /*
+ * NOTE: On the MGMTD daemon we point the VTY candidate DS to
+ * the global MGMTD candidate DS. Else we point to the VTY
+ * Shared Candidate Config.
+ */
+ vty->candidate_config = vty_mgmt_candidate_config
+ ? vty_mgmt_candidate_config
+ : vty_shared_candidate_config;
if (frr_get_cli_mode() == FRR_CLI_TRANSACTIONAL)
vty->candidate_config_base =
nb_config_dup(running_config);
{
vty->xpath_index = 0;
+ if (vty_mgmt_fe_enabled() && mgmt_candidate_ds_wr_locked &&
+ vty->mgmt_locked_candidate_ds) {
+ if (vty_mgmt_send_lockds_req(vty, MGMTD_DS_CANDIDATE, false) !=
+ 0) {
+ vty_out(vty, "Not able to unlock candidate DS\n");
+ return CMD_WARNING;
+ }
+
+ vty->mgmt_locked_candidate_ds = false;
+ mgmt_candidate_ds_wr_locked = false;
+ }
+
/* Perform any pending commits. */
(void)nb_cli_pending_commit_check(vty);
}
/* Master of the threads. */
-static struct thread_master *vty_master;
+static struct event_loop *vty_master;
static void vty_event_serv(enum vty_event event, struct vty_serv *vty_serv)
{
switch (event) {
case VTY_SERV:
- thread_add_read(vty_master, vty_accept, vty_serv,
- vty_serv->sock, &vty_serv->t_accept);
+ event_add_read(vty_master, vty_accept, vty_serv, vty_serv->sock,
+ &vty_serv->t_accept);
break;
#ifdef VTYSH
case VTYSH_SERV:
- thread_add_read(vty_master, vtysh_accept, vty_serv,
- vty_serv->sock, &vty_serv->t_accept);
+ event_add_read(vty_master, vtysh_accept, vty_serv,
+ vty_serv->sock, &vty_serv->t_accept);
break;
#endif /* VTYSH */
case VTY_READ:
switch (event) {
#ifdef VTYSH
case VTYSH_READ:
- thread_add_read(vty_master, vtysh_read, vty, vty->fd,
- &vty->t_read);
+ event_add_read(vty_master, vtysh_read, vty, vty->fd,
+ &vty->t_read);
break;
case VTYSH_WRITE:
- thread_add_write(vty_master, vtysh_write, vty, vty->wfd,
- &vty->t_write);
+ event_add_write(vty_master, vtysh_write, vty, vty->wfd,
+ &vty->t_write);
break;
#endif /* VTYSH */
case VTY_READ:
- thread_add_read(vty_master, vty_read, vty, vty->fd,
- &vty->t_read);
+ event_add_read(vty_master, vty_read, vty, vty->fd,
+ &vty->t_read);
/* Time out treatment. */
if (vty->v_timeout) {
- THREAD_OFF(vty->t_timeout);
- thread_add_timer(vty_master, vty_timeout, vty,
- vty->v_timeout, &vty->t_timeout);
+ EVENT_OFF(vty->t_timeout);
+ event_add_timer(vty_master, vty_timeout, vty,
+ vty->v_timeout, &vty->t_timeout);
}
break;
case VTY_WRITE:
- thread_add_write(vty_master, vty_flush, vty, vty->wfd,
- &vty->t_write);
+ event_add_write(vty_master, vty_flush, vty, vty->wfd,
+ &vty->t_write);
break;
case VTY_TIMEOUT_RESET:
- THREAD_OFF(vty->t_timeout);
+ EVENT_OFF(vty->t_timeout);
if (vty->v_timeout)
- thread_add_timer(vty_master, vty_timeout, vty,
- vty->v_timeout, &vty->t_timeout);
+ event_add_timer(vty_master, vty_timeout, vty,
+ vty->v_timeout, &vty->t_timeout);
break;
case VTY_SERV:
case VTYSH_SERV:
/* currently nothing to do, but likely to have future use */
}
+static void vty_mgmt_server_connected(uintptr_t lib_hndl, uintptr_t usr_data,
+ bool connected)
+{
+ zlog_err("%sGot %sconnected %s MGMTD Frontend Server",
+ !connected ? "ERROR: " : "", !connected ? "dis: " : "",
+ !connected ? "from" : "to");
+
+ mgmt_fe_connected = connected;
+
+ /*
+ * TODO: Setup or teardown front-end sessions for existing
+ * VTY connections.
+ */
+}
+
+static void vty_mgmt_session_created(uintptr_t lib_hndl, uintptr_t usr_data,
+ uint64_t client_id, bool create,
+ bool success, uintptr_t session_id,
+ uintptr_t session_ctx)
+{
+ struct vty *vty;
+
+ vty = (struct vty *)session_ctx;
+
+ if (!success) {
+ zlog_err("%s session for client %llu failed!",
+ create ? "Creating" : "Destroying",
+ (unsigned long long)client_id);
+ return;
+ }
+
+ zlog_err("%s session for client %llu successfully!",
+ create ? "Created" : "Destroyed",
+ (unsigned long long)client_id);
+ if (create)
+ vty->mgmt_session_id = session_id;
+}
+
+static void vty_mgmt_ds_lock_notified(uintptr_t lib_hndl, uintptr_t usr_data,
+ uint64_t client_id, uintptr_t session_id,
+ uintptr_t session_ctx, uint64_t req_id,
+ bool lock_ds, bool success,
+ Mgmtd__DatastoreId ds_id,
+ char *errmsg_if_any)
+{
+ struct vty *vty;
+
+ vty = (struct vty *)session_ctx;
+
+ if (!success) {
+ zlog_err("%socking for DS %u failed! Err: '%s'",
+ lock_ds ? "L" : "Unl", ds_id, errmsg_if_any);
+ vty_out(vty, "ERROR: %socking for DS %u failed! Err: '%s'\n",
+ lock_ds ? "L" : "Unl", ds_id, errmsg_if_any);
+ } else {
+ zlog_err("%socked DS %u successfully!", lock_ds ? "L" : "Unl",
+ ds_id);
+ }
+
+ vty_mgmt_resume_response(vty, success);
+}
+
+static void vty_mgmt_set_config_result_notified(
+ uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+ uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
+ bool success, Mgmtd__DatastoreId ds_id, char *errmsg_if_any)
+{
+ struct vty *vty;
+
+ vty = (struct vty *)session_ctx;
+
+ if (!success) {
+ zlog_err(
+ "SET_CONFIG request for client 0x%llx failed! Error: '%s'",
+ (unsigned long long)client_id,
+ errmsg_if_any ? errmsg_if_any : "Unknown");
+ vty_out(vty, "ERROR: SET_CONFIG request failed! Error: %s\n",
+ errmsg_if_any ? errmsg_if_any : "Unknown");
+ } else {
+ zlog_err(
+ "SET_CONFIG request for client 0x%llx req-id %llu was successfull!",
+ (unsigned long long)client_id,
+ (unsigned long long)req_id);
+ }
+
+ vty_mgmt_resume_response(vty, success);
+}
+
+static void vty_mgmt_commit_config_result_notified(
+ uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+ uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
+ bool success, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id, bool validate_only, char *errmsg_if_any)
+{
+ struct vty *vty;
+
+ vty = (struct vty *)session_ctx;
+
+ if (!success) {
+ zlog_err(
+ "COMMIT_CONFIG request for client 0x%llx failed! Error: '%s'",
+ (unsigned long long)client_id,
+ errmsg_if_any ? errmsg_if_any : "Unknown");
+ vty_out(vty, "ERROR: COMMIT_CONFIG request failed! Error: %s\n",
+ errmsg_if_any ? errmsg_if_any : "Unknown");
+ } else {
+ zlog_err(
+ "COMMIT_CONFIG request for client 0x%llx req-id %llu was successfull!",
+ (unsigned long long)client_id,
+ (unsigned long long)req_id);
+ if (errmsg_if_any)
+ vty_out(vty, "MGMTD: %s\n", errmsg_if_any);
+ }
+
+ vty_mgmt_resume_response(vty, success);
+}
+
+static enum mgmt_result vty_mgmt_get_data_result_notified(
+ uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+ uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
+ bool success, Mgmtd__DatastoreId ds_id, Mgmtd__YangData **yang_data,
+ size_t num_data, int next_key, char *errmsg_if_any)
+{
+ struct vty *vty;
+ size_t indx;
+
+ vty = (struct vty *)session_ctx;
+
+ if (!success) {
+ zlog_err(
+ "GET_DATA request for client 0x%llx failed! Error: '%s'",
+ (unsigned long long)client_id,
+ errmsg_if_any ? errmsg_if_any : "Unknown");
+ vty_out(vty, "ERROR: GET_DATA request failed! Error: %s\n",
+ errmsg_if_any ? errmsg_if_any : "Unknown");
+ vty_mgmt_resume_response(vty, success);
+ return MGMTD_INTERNAL_ERROR;
+ }
+
+ zlog_debug(
+ "GET_DATA request for client 0x%llx req-id %llu was successfull!",
+ (unsigned long long)client_id, (unsigned long long)req_id);
+
+ if (req_id != mgmt_last_req_id) {
+ mgmt_last_req_id = req_id;
+ vty_out(vty, "[\n");
+ }
+
+ for (indx = 0; indx < num_data; indx++) {
+ vty_out(vty, " \"%s\": \"%s\"\n", yang_data[indx]->xpath,
+ yang_data[indx]->value->encoded_str_val);
+ }
+ if (next_key < 0) {
+ vty_out(vty, "]\n");
+ vty_mgmt_resume_response(vty, success);
+ }
+
+ return MGMTD_SUCCESS;
+}
+
+static struct mgmt_fe_client_params client_params = {
+ .client_connect_notify = vty_mgmt_server_connected,
+ .client_session_notify = vty_mgmt_session_created,
+ .lock_ds_notify = vty_mgmt_ds_lock_notified,
+ .set_config_notify = vty_mgmt_set_config_result_notified,
+ .commit_config_notify = vty_mgmt_commit_config_result_notified,
+ .get_data_notify = vty_mgmt_get_data_result_notified,
+};
+
+void vty_init_mgmt_fe(void)
+{
+ if (!vty_master) {
+ zlog_err("Always call vty_mgmt_init_fe() after vty_init()!!");
+ return;
+ }
+
+ assert(!mgmt_lib_hndl);
+ snprintf(client_params.name, sizeof(client_params.name), "%s-%lld",
+ frr_get_progname(), (long long)getpid());
+ mgmt_lib_hndl = mgmt_fe_client_lib_init(&client_params, vty_master);
+ assert(mgmt_lib_hndl);
+}
+
+bool vty_mgmt_fe_enabled(void)
+{
+ return mgmt_lib_hndl && mgmt_fe_connected ? true : false;
+}
+
+int vty_mgmt_send_lockds_req(struct vty *vty, Mgmtd__DatastoreId ds_id,
+ bool lock)
+{
+ enum mgmt_result ret;
+
+ if (mgmt_lib_hndl && vty->mgmt_session_id) {
+ vty->mgmt_req_id++;
+ ret = mgmt_fe_lock_ds(mgmt_lib_hndl, vty->mgmt_session_id,
+ vty->mgmt_req_id, ds_id, lock);
+ if (ret != MGMTD_SUCCESS) {
+ zlog_err(
+ "Failed to send %sLOCK-DS-REQ to MGMTD for req-id %llu.",
+ lock ? "" : "UN",
+ (unsigned long long)vty->mgmt_req_id);
+ vty_out(vty, "Failed to send %sLOCK-DS-REQ to MGMTD!",
+ lock ? "" : "UN");
+ return -1;
+ }
+
+ vty->mgmt_req_pending = true;
+ }
+
+ return 0;
+}
+
+int vty_mgmt_send_config_data(struct vty *vty)
+{
+ Mgmtd__YangDataValue value[VTY_MAXCFGCHANGES];
+ Mgmtd__YangData cfg_data[VTY_MAXCFGCHANGES];
+ Mgmtd__YangCfgDataReq cfg_req[VTY_MAXCFGCHANGES];
+ Mgmtd__YangCfgDataReq *cfgreq[VTY_MAXCFGCHANGES] = {0};
+ size_t indx;
+ int cnt;
+ bool implicit_commit = false;
+
+ if (mgmt_lib_hndl && vty->mgmt_session_id) {
+ cnt = 0;
+ for (indx = 0; indx < vty->num_cfg_changes; indx++) {
+ mgmt_yang_data_init(&cfg_data[cnt]);
+
+ if (vty->cfg_changes[indx].value) {
+ mgmt_yang_data_value_init(&value[cnt]);
+ value[cnt].encoded_str_val =
+ (char *)vty->cfg_changes[indx].value;
+ value[cnt].value_case =
+ MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
+ cfg_data[cnt].value = &value[cnt];
+ }
+
+ cfg_data[cnt].xpath = vty->cfg_changes[indx].xpath;
+
+ mgmt_yang_cfg_data_req_init(&cfg_req[cnt]);
+ cfg_req[cnt].data = &cfg_data[cnt];
+ switch (vty->cfg_changes[indx].operation) {
+ case NB_OP_DESTROY:
+ cfg_req[cnt].req_type =
+ MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA;
+ break;
+
+ case NB_OP_CREATE:
+ case NB_OP_MODIFY:
+ case NB_OP_MOVE:
+ case NB_OP_PRE_VALIDATE:
+ case NB_OP_APPLY_FINISH:
+ cfg_req[cnt].req_type =
+ MGMTD__CFG_DATA_REQ_TYPE__SET_DATA;
+ break;
+ case NB_OP_GET_ELEM:
+ case NB_OP_GET_NEXT:
+ case NB_OP_GET_KEYS:
+ case NB_OP_LOOKUP_ENTRY:
+ case NB_OP_RPC:
+ assert(!"Invalid type of operation");
+ break;
+ default:
+ assert(!"non-enum value, invalid");
+ }
+
+ cfgreq[cnt] = &cfg_req[cnt];
+ cnt++;
+ }
+
+ vty->mgmt_req_id++;
+ implicit_commit = vty_needs_implicit_commit(vty);
+ if (cnt && mgmt_fe_set_config_data(
+ mgmt_lib_hndl, vty->mgmt_session_id,
+ vty->mgmt_req_id, MGMTD_DS_CANDIDATE, cfgreq,
+ cnt, implicit_commit,
+ MGMTD_DS_RUNNING) != MGMTD_SUCCESS) {
+ zlog_err("Failed to send %d Config Xpaths to MGMTD!!",
+ (int)indx);
+ return -1;
+ }
+
+ vty->mgmt_req_pending = true;
+ }
+
+ return 0;
+}
+
+int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only, bool abort)
+{
+ enum mgmt_result ret;
+
+ if (mgmt_lib_hndl && vty->mgmt_session_id) {
+ vty->mgmt_req_id++;
+ ret = mgmt_fe_commit_config_data(
+ mgmt_lib_hndl, vty->mgmt_session_id, vty->mgmt_req_id,
+ MGMTD_DS_CANDIDATE, MGMTD_DS_RUNNING, validate_only,
+ abort);
+ if (ret != MGMTD_SUCCESS) {
+ zlog_err(
+ "Failed to send COMMIT-REQ to MGMTD for req-id %llu.",
+ (unsigned long long)vty->mgmt_req_id);
+ vty_out(vty, "Failed to send COMMIT-REQ to MGMTD!");
+ return -1;
+ }
+
+ vty->mgmt_req_pending = true;
+ vty->mgmt_num_pending_setcfg = 0;
+ }
+
+ return 0;
+}
+
+int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore,
+ const char **xpath_list, int num_req)
+{
+ enum mgmt_result ret;
+ Mgmtd__YangData yang_data[VTY_MAXCFGCHANGES];
+ Mgmtd__YangGetDataReq get_req[VTY_MAXCFGCHANGES];
+ Mgmtd__YangGetDataReq *getreq[VTY_MAXCFGCHANGES];
+ int i;
+
+ vty->mgmt_req_id++;
+
+ for (i = 0; i < num_req; i++) {
+ mgmt_yang_get_data_req_init(&get_req[i]);
+ mgmt_yang_data_init(&yang_data[i]);
+
+ yang_data->xpath = (char *)xpath_list[i];
+
+ get_req[i].data = &yang_data[i];
+ getreq[i] = &get_req[i];
+ }
+ ret = mgmt_fe_get_config_data(mgmt_lib_hndl, vty->mgmt_session_id,
+ vty->mgmt_req_id, datastore, getreq,
+ num_req);
+
+ if (ret != MGMTD_SUCCESS) {
+ zlog_err("Failed to send GET-CONFIG to MGMTD for req-id %llu.",
+ (unsigned long long)vty->mgmt_req_id);
+ vty_out(vty, "Failed to send GET-CONFIG to MGMTD!");
+ return -1;
+ }
+
+ vty->mgmt_req_pending = true;
+
+ return 0;
+}
+
+int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore,
+ const char **xpath_list, int num_req)
+{
+ enum mgmt_result ret;
+ Mgmtd__YangData yang_data[VTY_MAXCFGCHANGES];
+ Mgmtd__YangGetDataReq get_req[VTY_MAXCFGCHANGES];
+ Mgmtd__YangGetDataReq *getreq[VTY_MAXCFGCHANGES];
+ int i;
+
+ vty->mgmt_req_id++;
+
+ for (i = 0; i < num_req; i++) {
+ mgmt_yang_get_data_req_init(&get_req[i]);
+ mgmt_yang_data_init(&yang_data[i]);
+
+ yang_data->xpath = (char *)xpath_list[i];
+
+ get_req[i].data = &yang_data[i];
+ getreq[i] = &get_req[i];
+ }
+ ret = mgmt_fe_get_data(mgmt_lib_hndl, vty->mgmt_session_id,
+ vty->mgmt_req_id, datastore, getreq, num_req);
+
+ if (ret != MGMTD_SUCCESS) {
+ zlog_err("Failed to send GET-DATA to MGMTD for req-id %llu.",
+ (unsigned long long)vty->mgmt_req_id);
+ vty_out(vty, "Failed to send GET-DATA to MGMTD!");
+ return -1;
+ }
+
+ vty->mgmt_req_pending = true;
+
+ return 0;
+}
+
/* Install vty's own commands like `who' command. */
-void vty_init(struct thread_master *master_thread, bool do_command_logging)
+void vty_init(struct event_loop *master_thread, bool do_command_logging)
{
/* For further configuration read, preserve current directory. */
vty_save_cwd();
struct vty *vty;
struct vty_serv *vtyserv;
+ if (mgmt_lib_hndl) {
+ mgmt_fe_client_lib_destroy(mgmt_lib_hndl);
+ mgmt_lib_hndl = 0;
+ }
+
memset(vty_cwd, 0x00, sizeof(vty_cwd));
vty_reset();
vtys_init(vtysh_sessions);
while ((vtyserv = vtyservs_pop(vty_servs))) {
- THREAD_OFF(vtyserv->t_accept);
+ EVENT_OFF(vtyserv->t_accept);
close(vtyserv->sock);
XFREE(MTYPE_VTY_SERV, vtyserv);
}
#include <regex.h>
#endif /* HAVE_LIBPCRE2_POSIX */
-#include "thread.h"
+#include "frrevent.h"
#include "log.h"
#include "sockunion.h"
#include "qobj.h"
#include "compiler.h"
#include "northbound.h"
#include "zlog_live.h"
+#include "libfrr.h"
+#include "mgmt_fe_client.h"
#ifdef __cplusplus
extern "C" {
/* Changes enqueued to be applied in the candidate configuration. */
size_t num_cfg_changes;
- struct vty_cfg_change cfg_changes[VTY_MAXCFGCHANGES];
+ struct nb_cfg_change cfg_changes[VTY_MAXCFGCHANGES];
/* XPath of the current node */
int xpath_index;
char xpath[VTY_MAXDEPTH][XPATH_MAXLEN];
+ /*
+ * Keep track of how many SET_CFG requests has been sent so far that
+ * has not been committed yet.
+ */
+ size_t mgmt_num_pending_setcfg;
+
/* In configure mode. */
bool config;
/* Dynamic transaction information. */
bool pending_allowed;
bool pending_commit;
+ bool no_implicit_commit;
char *pending_cmds_buf;
size_t pending_cmds_buflen;
size_t pending_cmds_bufpos;
/* Confirmed-commit timeout and rollback configuration. */
- struct thread *t_confirmed_commit_timeout;
+ struct event *t_confirmed_commit_timeout;
struct nb_config *confirmed_commit_rollback;
/* qobj object ID (replacement for "index") */
int lines;
/* Read and write thread. */
- struct thread *t_read;
- struct thread *t_write;
+ struct event *t_read;
+ struct event *t_write;
/* Timeout seconds and thread. */
unsigned long v_timeout;
- struct thread *t_timeout;
+ struct event *t_timeout;
/* What address is this vty comming from. */
char address[SU_ADDRSTRLEN];
* without any output. */
size_t frame_pos;
char frame[1024];
+
+ uintptr_t mgmt_session_id;
+ uint64_t mgmt_client_id;
+ uint64_t mgmt_req_id;
+ bool mgmt_req_pending;
+ bool mgmt_locked_candidate_ds;
};
static inline void vty_push_context(struct vty *vty, int node, uint64_t id)
#define IS_DIRECTORY_SEP(c) ((c) == DIRECTORY_SEP)
#endif
+extern struct nb_config *vty_mgmt_candidate_config;
+
/* Prototypes. */
-extern void vty_init(struct thread_master *, bool do_command_logging);
+extern void vty_init(struct event_loop *m, bool do_command_logging);
extern void vty_init_vtysh(void);
extern void vty_terminate(void);
extern void vty_reset(void);
extern void vty_stdio_resume(void);
extern void vty_stdio_close(void);
+extern void vty_init_mgmt_fe(void);
+extern bool vty_mgmt_fe_enabled(void);
+extern int vty_mgmt_send_config_data(struct vty *vty);
+extern int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only,
+ bool abort);
+extern int vty_mgmt_send_get_config(struct vty *vty,
+ Mgmtd__DatastoreId datastore,
+ const char **xpath_list, int num_req);
+extern int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore,
+ const char **xpath_list, int num_req);
+extern int vty_mgmt_send_lockds_req(struct vty *vty, Mgmtd__DatastoreId ds_id,
+ bool lock);
+extern void vty_mgmt_resume_response(struct vty *vty, bool success);
+
+static inline bool vty_needs_implicit_commit(struct vty *vty)
+{
+ return (frr_get_cli_mode() == FRR_CLI_CLASSIC
+ ? ((vty->pending_allowed || vty->no_implicit_commit)
+ ? false
+ : true)
+ : false);
+}
+
#ifdef __cplusplus
}
#endif
*/
#include "zebra.h"
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "wheel.h"
#include "log.h"
static int debug_timer_wheel = 0;
-static void wheel_timer_thread(struct thread *t);
+static void wheel_timer_thread(struct event *t);
-static void wheel_timer_thread_helper(struct thread *t)
+static void wheel_timer_thread_helper(struct event *t)
{
struct listnode *node, *nextnode;
unsigned long long curr_slot;
struct timer_wheel *wheel;
void *data;
- wheel = THREAD_ARG(t);
+ wheel = EVENT_ARG(t);
wheel->curr_slot += wheel->slots_to_skip;
slots_to_skip++;
wheel->slots_to_skip = slots_to_skip;
- thread_add_timer_msec(wheel->master, wheel_timer_thread, wheel,
- wheel->nexttime * slots_to_skip, &wheel->timer);
+ event_add_timer_msec(wheel->master, wheel_timer_thread, wheel,
+ wheel->nexttime * slots_to_skip, &wheel->timer);
}
-static void wheel_timer_thread(struct thread *t)
+static void wheel_timer_thread(struct event *t)
{
struct timer_wheel *wheel;
- wheel = THREAD_ARG(t);
+ wheel = EVENT_ARG(t);
- thread_execute(wheel->master, wheel_timer_thread_helper, wheel, 0);
+ event_execute(wheel->master, wheel_timer_thread_helper, wheel, 0);
}
-struct timer_wheel *wheel_init(struct thread_master *master, int period,
- size_t slots, unsigned int (*slot_key)(const void *),
- void (*slot_run)(void *),
- const char *run_name)
+struct timer_wheel *wheel_init(struct event_loop *master, int period,
+ size_t slots,
+ unsigned int (*slot_key)(const void *),
+ void (*slot_run)(void *), const char *run_name)
{
struct timer_wheel *wheel;
size_t i;
for (i = 0; i < slots; i++)
wheel->wheel_slot_lists[i] = list_new();
- thread_add_timer_msec(wheel->master, wheel_timer_thread, wheel,
- wheel->nexttime, &wheel->timer);
+ event_add_timer_msec(wheel->master, wheel_timer_thread, wheel,
+ wheel->nexttime, &wheel->timer);
return wheel;
}
list_delete(&wheel->wheel_slot_lists[i]);
}
- THREAD_OFF(wheel->timer);
+ EVENT_OFF(wheel->timer);
XFREE(MTYPE_TIMER_WHEEL_LIST, wheel->wheel_slot_lists);
XFREE(MTYPE_TIMER_WHEEL, wheel->name);
XFREE(MTYPE_TIMER_WHEEL, wheel);
struct timer_wheel {
char *name;
- struct thread_master *master;
+ struct event_loop *master;
int slots;
long long curr_slot;
unsigned int period;
unsigned int slots_to_skip;
struct list **wheel_slot_lists;
- struct thread *timer;
+ struct event *timer;
/*
* Key to determine what slot the item belongs in
*/
* and cause significant amount of time handling thread events instead
* of running your code.
*/
-struct timer_wheel *wheel_init(struct thread_master *master, int period,
+struct timer_wheel *wheel_init(struct event_loop *master, int period,
size_t slots,
unsigned int (*slot_key)(const void *),
void (*slot_run)(void *), const char *run_name);
*/
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "workqueue.h"
#include "linklist.h"
}
/* create new work queue */
-struct work_queue *work_queue_new(struct thread_master *m,
- const char *queue_name)
+struct work_queue *work_queue_new(struct event_loop *m, const char *queue_name)
{
struct work_queue *new;
/* Default values, can be overridden by caller */
new->spec.hold = WORK_QUEUE_DEFAULT_HOLD;
- new->spec.yield = THREAD_YIELD_TIME_SLOT;
+ new->spec.yield = EVENT_YIELD_TIME_SLOT;
new->spec.retry = WORK_QUEUE_DEFAULT_RETRY;
return new;
{
struct work_queue *wq = *wqp;
- THREAD_OFF(wq->thread);
+ EVENT_OFF(wq->thread);
while (!work_queue_empty(wq)) {
struct work_queue_item *item = work_queue_last_item(wq);
bool work_queue_is_scheduled(struct work_queue *wq)
{
- return thread_is_scheduled(wq->thread);
+ return event_is_scheduled(wq->thread);
}
static int work_queue_schedule(struct work_queue *wq, unsigned int delay)
{
/* if appropriate, schedule work queue thread */
if (CHECK_FLAG(wq->flags, WQ_UNPLUGGED) &&
- !thread_is_scheduled(wq->thread) && !work_queue_empty(wq)) {
+ !event_is_scheduled(wq->thread) && !work_queue_empty(wq)) {
/* Schedule timer if there's a delay, otherwise just schedule
* as an 'event'
*/
if (delay > 0) {
- thread_add_timer_msec(wq->master, work_queue_run, wq,
- delay, &wq->thread);
- thread_ignore_late_timer(wq->thread);
+ event_add_timer_msec(wq->master, work_queue_run, wq,
+ delay, &wq->thread);
+ event_ignore_late_timer(wq->thread);
} else
- thread_add_event(wq->master, work_queue_run, wq, 0,
- &wq->thread);
+ event_add_event(wq->master, work_queue_run, wq, 0,
+ &wq->thread);
/* set thread yield time, if needed */
- if (thread_is_scheduled(wq->thread) &&
- wq->spec.yield != THREAD_YIELD_TIME_SLOT)
- thread_set_yield_time(wq->thread, wq->spec.yield);
+ if (event_is_scheduled(wq->thread) &&
+ wq->spec.yield != EVENT_YIELD_TIME_SLOT)
+ event_set_yield_time(wq->thread, wq->spec.yield);
return 1;
} else
return 0;
*/
void work_queue_plug(struct work_queue *wq)
{
- THREAD_OFF(wq->thread);
+ EVENT_OFF(wq->thread);
UNSET_FLAG(wq->flags, WQ_UNPLUGGED);
}
* will reschedule itself if required,
* otherwise work_queue_item_add
*/
-void work_queue_run(struct thread *thread)
+void work_queue_run(struct event *thread)
{
struct work_queue *wq;
struct work_queue_item *item, *titem;
unsigned int cycles = 0;
char yielded = 0;
- wq = THREAD_ARG(thread);
+ wq = EVENT_ARG(thread);
assert(wq);
cycles++;
/* test if we should yield */
- if (!(cycles % wq->cycles.granularity)
- && thread_should_yield(thread)) {
+ if (!(cycles % wq->cycles.granularity) &&
+ event_should_yield(thread)) {
yielded = 1;
goto stats;
}
/* Everything but the specification struct is private
* the following may be read
*/
- struct thread_master *master; /* thread master */
- struct thread *thread; /* thread, if one is active */
+ struct event_loop *master; /* thread master */
+ struct event *thread; /* thread, if one is active */
char *name; /* work queue name */
/* Specification for this work queue.
* user must fill in the spec of the returned work queue before adding
* anything to it
*/
-extern struct work_queue *work_queue_new(struct thread_master *m,
+extern struct work_queue *work_queue_new(struct event_loop *m,
const char *queue_name);
/* destroy work queue */
bool work_queue_is_scheduled(struct work_queue *wq);
/* Helpers, exported for thread.c and command.c */
-extern void work_queue_run(struct thread *thread);
+extern void work_queue_run(struct event *thread);
extern void workqueue_cmd_init(void);
enum xref_type {
XREFT_NONE = 0,
- XREFT_THREADSCHED = 0x100,
+ XREFT_EVENTSCHED = 0x100,
XREFT_LOGMSG = 0x200,
XREFT_ASSERT = 0x280,
xpath += 2;
if (lyd_find_xpath(dnode, xpath, &set)) {
- assert(0); /* XXX replicates old libyang1 base code */
+ /*
+ * Commenting out the below assert failure as it crashes mgmtd
+ * when bad xpath is passed.
+ *
+ * assert(0); XXX replicates old libyang1 base code
+ */
goto exit;
}
if (set->count == 0)
#endif
#ifdef HAVE_SYSREPO
sr_subscription_ctx_t *sr_subscription;
- struct thread *sr_thread;
+ struct event *sr_thread;
#endif
};
RB_HEAD(yang_modules, yang_module);
#include "vrf_int.h"
#include "if.h"
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "zclient.h"
#include "memory.h"
#include "table.h"
static int zclient_debug;
/* Allocate zclient structure. */
-struct zclient *zclient_new(struct thread_master *master,
+struct zclient *zclient_new(struct event_loop *master,
struct zclient_options *opt,
zclient_handler *const *handlers, size_t n_handlers)
{
zlog_debug("zclient %p stopped", zclient);
/* Stop threads. */
- THREAD_OFF(zclient->t_read);
- THREAD_OFF(zclient->t_connect);
- THREAD_OFF(zclient->t_write);
+ EVENT_OFF(zclient->t_read);
+ EVENT_OFF(zclient->t_connect);
+ EVENT_OFF(zclient->t_write);
/* Reset streams. */
stream_reset(zclient->ibuf);
return ZCLIENT_SEND_FAILURE;
}
-static void zclient_flush_data(struct thread *thread)
+static void zclient_flush_data(struct event *thread)
{
- struct zclient *zclient = THREAD_ARG(thread);
+ struct zclient *zclient = EVENT_ARG(thread);
zclient->t_write = NULL;
if (zclient->sock < 0)
return;
case BUFFER_PENDING:
zclient->t_write = NULL;
- thread_add_write(zclient->master, zclient_flush_data, zclient,
- zclient->sock, &zclient->t_write);
+ event_add_write(zclient->master, zclient_flush_data, zclient,
+ zclient->sock, &zclient->t_write);
break;
case BUFFER_EMPTY:
if (zclient->zebra_buffer_write_ready)
__func__, zclient->sock);
return zclient_failed(zclient);
case BUFFER_EMPTY:
- THREAD_OFF(zclient->t_write);
+ EVENT_OFF(zclient->t_write);
return ZCLIENT_SEND_SUCCESS;
case BUFFER_PENDING:
- thread_add_write(zclient->master, zclient_flush_data, zclient,
- zclient->sock, &zclient->t_write);
+ event_add_write(zclient->master, zclient_flush_data, zclient,
+ zclient->sock, &zclient->t_write);
return ZCLIENT_SEND_BUFFERED;
}
/* This function is a wrapper function for calling zclient_start from
timer or event thread. */
-static void zclient_connect(struct thread *t)
+static void zclient_connect(struct event *t)
{
struct zclient *zclient;
- zclient = THREAD_ARG(t);
+ zclient = EVENT_ARG(t);
zclient->t_connect = NULL;
if (zclient_debug)
};
/* Zebra client message read function. */
-static void zclient_read(struct thread *thread)
+static void zclient_read(struct event *thread)
{
size_t already;
uint16_t length, command;
struct zclient *zclient;
/* Get socket to zebra. */
- zclient = THREAD_ARG(thread);
+ zclient = EVENT_ARG(thread);
zclient->t_read = NULL;
/* Read zebra header (if we don't have it already). */
{
switch (event) {
case ZCLIENT_SCHEDULE:
- thread_add_event(zclient->master, zclient_connect, zclient, 0,
- &zclient->t_connect);
+ event_add_event(zclient->master, zclient_connect, zclient, 0,
+ &zclient->t_connect);
break;
case ZCLIENT_CONNECT:
if (zclient_debug)
zlog_debug(
"zclient connect failures: %d schedule interval is now %d",
zclient->fail, zclient->fail < 3 ? 10 : 60);
- thread_add_timer(zclient->master, zclient_connect, zclient,
- zclient->fail < 3 ? 10 : 60,
- &zclient->t_connect);
+ event_add_timer(zclient->master, zclient_connect, zclient,
+ zclient->fail < 3 ? 10 : 60,
+ &zclient->t_connect);
break;
case ZCLIENT_READ:
zclient->t_read = NULL;
- thread_add_read(zclient->master, zclient_read, zclient,
- zclient->sock, &zclient->t_read);
+ event_add_read(zclient->master, zclient_read, zclient,
+ zclient->sock, &zclient->t_read);
break;
}
}
/* Structure for the zebra client. */
struct zclient {
/* The thread master we schedule ourselves on */
- struct thread_master *master;
+ struct event_loop *master;
/* Privileges to change socket values */
struct zebra_privs_t *privs;
struct buffer *wb;
/* Read and connect thread. */
- struct thread *t_read;
- struct thread *t_connect;
+ struct event *t_read;
+ struct event *t_connect;
/* Thread to write buffered data to zebra. */
- struct thread *t_write;
+ struct event *t_write;
/* Redistribute information. */
uint8_t redist_default; /* clients protocol */
extern uint32_t zclient_get_nhg_start(uint32_t proto);
-extern struct zclient *zclient_new(struct thread_master *m,
+extern struct zclient *zclient_new(struct event_loop *m,
struct zclient_options *opt,
zclient_handler *const *handlers,
size_t n_handlers);
#include "frrcu.h"
#include "zlog.h"
#include "libfrr_trace.h"
-#include "thread.h"
+#include "frrevent.h"
DEFINE_MTYPE_STATIC(LIB, LOG_MESSAGE, "log message");
DEFINE_MTYPE_STATIC(LIB, LOG_TLSBUF, "log thread-local buffer");
static void zlog_backtrace_msg(const struct xref_logmsg *xref, int prio)
{
- struct thread *tc = pthread_getspecific(thread_current);
+ struct event *tc = pthread_getspecific(thread_current);
const char *uid = xref->xref.xrefdata->uid;
bool found_thread = false;
#include "frr_pthread.h"
#include "command.h"
#include "monotime.h"
-#include "thread.h"
+#include "frrevent.h"
#include "lib/version.h"
#include "lib/lib_errors.h"
rcu_free(MTYPE_LOG_5424, oldt, zt.rcu_head);
}
-static void zlog_5424_reconnect(struct thread *t)
+static void zlog_5424_reconnect(struct event *t)
{
- struct zlog_cfg_5424 *zcf = THREAD_ARG(t);
- int fd = THREAD_FD(t);
+ struct zlog_cfg_5424 *zcf = EVENT_ARG(t);
+ int fd = EVENT_FD(t);
char dummy[256];
ssize_t ret;
ret = read(fd, dummy, sizeof(dummy));
if (ret > 0) {
/* logger is sending us something?!?! */
- thread_add_read(t->master, zlog_5424_reconnect, zcf, fd,
- &zcf->t_reconnect);
+ event_add_read(t->master, zlog_5424_reconnect, zcf, fd,
+ &zcf->t_reconnect);
return;
}
assert(zcf->master);
if (fd != -1) {
- thread_add_read(zcf->master, zlog_5424_reconnect, zcf,
- fd, &zcf->t_reconnect);
+ event_add_read(zcf->master, zlog_5424_reconnect, zcf,
+ fd, &zcf->t_reconnect);
zcf->reconn_backoff_cur = zcf->reconn_backoff;
} else {
- thread_add_timer_msec(zcf->master, zlog_5424_reconnect,
- zcf, zcf->reconn_backoff_cur,
- &zcf->t_reconnect);
+ event_add_timer_msec(zcf->master, zlog_5424_reconnect,
+ zcf, zcf->reconn_backoff_cur,
+ &zcf->t_reconnect);
zcf->reconn_backoff_cur += zcf->reconn_backoff_cur / 2;
if (zcf->reconn_backoff_cur > zcf->reconn_backoff_max)
{
int fd = -1;
- thread_cancel(&zcf->t_reconnect);
+ event_cancel(&zcf->t_reconnect);
if (zcf->prio_min != ZLOG_DISABLED)
fd = zlog_5424_open(zcf, -1);
if (!zcf->active)
return true;
- thread_cancel(&zcf->t_reconnect);
+ event_cancel(&zcf->t_reconnect);
/* need to retain the socket type because it also influences
* other fields (packets) and we can't atomically swap these
#include "zlog_targets.h"
#include "qobj.h"
-struct thread;
-struct thread_master;
+struct event;
+struct event_loop;
enum zlog_5424_dst {
/* can be used to disable a target temporarily */
*/
/* sockets only - read handler to reconnect on errors */
- struct thread_master *master;
- struct thread *t_reconnect;
+ struct event_loop *master;
+ struct event *t_reconnect;
unsigned int reconn_backoff, reconn_backoff_cur, reconn_backoff_max;
int sock_type;
struct sockaddr_storage sa;
DEFINE_QOBJ_TYPE(zlog_cfg_5424_user);
static struct targets_head targets = INIT_RBTREE_UNIQ(targets);
-static struct thread_master *log_5424_master;
+static struct event_loop *log_5424_master;
static void clear_dst(struct zlog_cfg_5424_user *cfg);
/* hooks */
-static int log_5424_early_init(struct thread_master *master);
+static int log_5424_early_init(struct event_loop *master);
static int log_5424_rotate(void);
static int log_5424_fini(void);
hook_register(frr_fini, log_5424_fini);
}
-static int log_5424_early_init(struct thread_master *master)
+static int log_5424_early_init(struct event_loop *master)
{
log_5424_master = master;
--- /dev/null
+all: ALWAYS
+ @$(MAKE) -s -C .. mgmtd/mgmtd
+%: ALWAYS
+ @$(MAKE) -s -C .. mgmtd/$@
+
+Makefile:
+ #nothing
+ALWAYS:
+.PHONY: ALWAYS makefiles
+.SUFFIXES:
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * FRR Management Daemon (MGMTD) program
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar
+ */
+
+#include <zebra.h>
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_be_server.h"
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt_fe_server.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+#include "mgmtd/mgmt_memory.h"
+
+bool mgmt_debug_be;
+bool mgmt_debug_fe;
+bool mgmt_debug_ds;
+bool mgmt_debug_txn;
+
+/* MGMTD process wide configuration. */
+static struct mgmt_master mgmt_master;
+
+/* MGMTD process wide configuration pointer to export. */
+struct mgmt_master *mm;
+
+void mgmt_master_init(struct event_loop *master, const int buffer_size)
+{
+ memset(&mgmt_master, 0, sizeof(struct mgmt_master));
+
+ mm = &mgmt_master;
+ mm->master = master;
+ mm->terminating = false;
+ mm->socket_buffer = buffer_size;
+ mm->perf_stats_en = true;
+}
+
+void mgmt_init(void)
+{
+
+ /*
+ * Allocates some vital data structures used by peer commands in
+ * vty_init
+ */
+ vty_init_mgmt_fe();
+
+ /* Initialize datastores */
+ mgmt_ds_init(mm);
+
+ /* Initialize history */
+ mgmt_history_init();
+
+ /* Initialize MGMTD Transaction module */
+ mgmt_txn_init(mm, mm->master);
+
+ /* Initialize the MGMTD Backend Adapter Module */
+ mgmt_be_adapter_init(mm->master);
+
+ /* Initialize the MGMTD Frontend Adapter Module */
+ mgmt_fe_adapter_init(mm->master, mm);
+
+ /* Start the MGMTD Backend Server for clients to connect */
+ mgmt_be_server_init(mm->master);
+
+ /* Start the MGMTD Frontend Server for clients to connect */
+ mgmt_fe_server_init(mm->master);
+
+ /* MGMTD VTY commands installation. */
+ mgmt_vty_init();
+}
+
+void mgmt_terminate(void)
+{
+ mgmt_fe_server_destroy();
+ mgmt_fe_adapter_destroy();
+ mgmt_be_server_destroy();
+ mgmt_be_adapter_destroy();
+ mgmt_txn_destroy();
+ mgmt_history_destroy();
+ mgmt_ds_destroy();
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD message definition header.
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_H
+#define _FRR_MGMTD_H
+
+#include "vrf.h"
+#include "defaults.h"
+#include "stream.h"
+
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_defines.h"
+#include "mgmtd/mgmt_history.h"
+#include "mgmtd/mgmt_txn.h"
+#include "mgmtd/mgmt_ds.h"
+
+#define MGMTD_VTY_PORT 2622
+#define MGMTD_SOCKET_BUF_SIZE 65535
+#define MGMTD_MAX_COMMIT_LIST 10
+
+extern bool mgmt_debug_be;
+extern bool mgmt_debug_fe;
+extern bool mgmt_debug_ds;
+extern bool mgmt_debug_txn;
+
+struct mgmt_txn_ctx;
+
+/*
+ * MGMTD master for system wide configurations and variables.
+ */
+struct mgmt_master {
+ struct event_loop *master;
+
+ /* How big should we set the socket buffer size */
+ uint32_t socket_buffer;
+
+ /* The single instance of config transaction allowed at any time */
+ struct mgmt_txns_head txn_list;
+
+ /* Map of Transactions and its ID */
+ struct hash *txn_hash;
+ uint64_t next_txn_id;
+
+ /* The single instance of config transaction allowed at any time */
+ struct mgmt_txn_ctx *cfg_txn;
+
+ /* Datastores */
+ struct mgmt_ds_ctx *running_ds;
+ struct mgmt_ds_ctx *candidate_ds;
+ struct mgmt_ds_ctx *oper_ds;
+
+ bool terminating; /* global flag that sigint terminate seen */
+ bool perf_stats_en; /* to enable performance stats measurement */
+
+ /* List of commit infos */
+ struct mgmt_cmt_infos_head cmts; /* List of last 10 commits executed. */
+};
+
+extern struct mgmt_master *mm;
+
+/* Inline functions */
+static inline unsigned long timeval_elapsed(struct timeval a, struct timeval b)
+{
+ return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO)
+ + (a.tv_usec - b.tv_usec));
+}
+
+/*
+ * Remove trailing separator from a string.
+ *
+ * str
+ * A null terminated string.
+ *
+ * sep
+ * Trailing character that needs to be removed.
+ */
+static inline void mgmt_remove_trailing_separator(char *str, char sep)
+{
+ size_t len;
+
+ len = strlen(str);
+ if (len && str[len - 1] == sep)
+ str[len - 1] = '\0';
+}
+
+/* Prototypes. */
+extern void mgmt_terminate(void);
+extern void mgmt_reset(void);
+extern time_t mgmt_clock(void);
+
+extern int mgmt_config_write(struct vty *vty);
+
+extern void mgmt_master_init(struct event_loop *master, const int buffer_size);
+
+extern void mgmt_init(void);
+extern void mgmt_vty_init(void);
+
+static inline char *mgmt_realtime_to_string(struct timeval *tv, char *buf,
+ size_t sz)
+{
+ struct tm tm;
+ size_t n;
+
+ localtime_r((const time_t *)&tv->tv_sec, &tm);
+ n = strftime(buf, sz, "%Y-%m-%dT%H:%M:%S", &tm);
+ snprintf(&buf[n], sz - n, ",%06u000", (unsigned int)tv->tv_usec);
+ return buf;
+}
+
+#endif /* _FRR_MGMTD_H */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Client Connection Adapter
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "frrevent.h"
+#include "sockopt.h"
+#include "network.h"
+#include "libfrr.h"
+#include "mgmt_msg.h"
+#include "mgmt_pb.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmt_be_client.h"
+#include "mgmtd/mgmt_be_adapter.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_BE_ADAPTER_DBG(fmt, ...) \
+ fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_BE_ADAPTER_ERR(fmt, ...) \
+ fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_BE_ADAPTER_DBG(fmt, ...) \
+ do { \
+ if (mgmt_debug_be) \
+ zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define MGMTD_BE_ADAPTER_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+#define FOREACH_ADAPTER_IN_LIST(adapter) \
+ frr_each_safe (mgmt_be_adapters, &mgmt_be_adapters, (adapter))
+
+/*
+ * Static mapping of YANG XPath regular expressions and
+ * the corresponding interested backend clients.
+ * NOTE: Thiis is a static mapping defined by all MGMTD
+ * backend client modules (for now, till we develop a
+ * more dynamic way of creating and updating this map).
+ * A running map is created by MGMTD in run-time to
+ * handle real-time mapping of YANG xpaths to one or
+ * more interested backend client adapters.
+ *
+ * Please see xpath_map_reg[] in lib/mgmt_be_client.c
+ * for the actual map
+ */
+struct mgmt_be_xpath_map_reg {
+ const char *xpath_regexp; /* Longest matching regular expression */
+ enum mgmt_be_client_id *be_clients; /* clients to notify */
+};
+
+struct mgmt_be_xpath_regexp_map {
+ const char *xpath_regexp;
+ struct mgmt_be_client_subscr_info be_subscrs;
+};
+
+struct mgmt_be_get_adapter_config_params {
+ struct mgmt_be_client_adapter *adapter;
+ struct nb_config_cbs *cfg_chgs;
+ uint32_t seq;
+};
+
+/*
+ * Static mapping of YANG XPath regular expressions and
+ * the corresponding interested backend clients.
+ * NOTE: Thiis is a static mapping defined by all MGMTD
+ * backend client modules (for now, till we develop a
+ * more dynamic way of creating and updating this map).
+ * A running map is created by MGMTD in run-time to
+ * handle real-time mapping of YANG xpaths to one or
+ * more interested backend client adapters.
+ */
+static const struct mgmt_be_xpath_map_reg xpath_static_map_reg[] = {
+ {.xpath_regexp = "/frr-vrf:lib/*",
+ .be_clients =
+ (enum mgmt_be_client_id[]){
+#if HAVE_STATICD
+ MGMTD_BE_CLIENT_ID_STATICD,
+#endif
+ MGMTD_BE_CLIENT_ID_MAX}},
+ {.xpath_regexp = "/frr-interface:lib/*",
+ .be_clients =
+ (enum mgmt_be_client_id[]){
+#if HAVE_STATICD
+ MGMTD_BE_CLIENT_ID_STATICD,
+#endif
+ MGMTD_BE_CLIENT_ID_MAX}},
+ {.xpath_regexp =
+ "/frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/*",
+
+ .be_clients =
+ (enum mgmt_be_client_id[]){
+#if HAVE_STATICD
+ MGMTD_BE_CLIENT_ID_STATICD,
+#endif
+ MGMTD_BE_CLIENT_ID_MAX}},
+};
+
+#define MGMTD_BE_MAX_NUM_XPATH_MAP 256
+static struct mgmt_be_xpath_regexp_map
+ mgmt_xpath_map[MGMTD_BE_MAX_NUM_XPATH_MAP];
+static int mgmt_num_xpath_maps;
+
+static struct event_loop *mgmt_be_adapter_tm;
+
+static struct mgmt_be_adapters_head mgmt_be_adapters;
+
+static struct mgmt_be_client_adapter
+ *mgmt_be_adapters_by_id[MGMTD_BE_CLIENT_ID_MAX];
+
+/* Forward declarations */
+static void
+mgmt_be_adapter_register_event(struct mgmt_be_client_adapter *adapter,
+ enum mgmt_be_event event);
+
+static struct mgmt_be_client_adapter *
+mgmt_be_find_adapter_by_fd(int conn_fd)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ if (adapter->conn_fd == conn_fd)
+ return adapter;
+ }
+
+ return NULL;
+}
+
+static struct mgmt_be_client_adapter *
+mgmt_be_find_adapter_by_name(const char *name)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ if (!strncmp(adapter->name, name, sizeof(adapter->name)))
+ return adapter;
+ }
+
+ return NULL;
+}
+
+static void
+mgmt_be_cleanup_adapters(void)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter)
+ mgmt_be_adapter_unlock(&adapter);
+}
+
+static void mgmt_be_xpath_map_init(void)
+{
+ int indx, num_xpath_maps;
+ uint16_t indx1;
+ enum mgmt_be_client_id id;
+
+ MGMTD_BE_ADAPTER_DBG("Init XPath Maps");
+
+ num_xpath_maps = (int)array_size(xpath_static_map_reg);
+ for (indx = 0; indx < num_xpath_maps; indx++) {
+ MGMTD_BE_ADAPTER_DBG(" - XPATH: '%s'",
+ xpath_static_map_reg[indx].xpath_regexp);
+ mgmt_xpath_map[indx].xpath_regexp =
+ xpath_static_map_reg[indx].xpath_regexp;
+ for (indx1 = 0;; indx1++) {
+ id = xpath_static_map_reg[indx].be_clients[indx1];
+ if (id == MGMTD_BE_CLIENT_ID_MAX)
+ break;
+ MGMTD_BE_ADAPTER_DBG(" -- Client: %s Id: %u",
+ mgmt_be_client_id2name(id),
+ id);
+ if (id < MGMTD_BE_CLIENT_ID_MAX) {
+ mgmt_xpath_map[indx]
+ .be_subscrs.xpath_subscr[id]
+ .validate_config = 1;
+ mgmt_xpath_map[indx]
+ .be_subscrs.xpath_subscr[id]
+ .notify_config = 1;
+ mgmt_xpath_map[indx]
+ .be_subscrs.xpath_subscr[id]
+ .own_oper_data = 1;
+ }
+ }
+ }
+
+ mgmt_num_xpath_maps = indx;
+ MGMTD_BE_ADAPTER_DBG("Total XPath Maps: %u", mgmt_num_xpath_maps);
+}
+
+static int mgmt_be_eval_regexp_match(const char *xpath_regexp,
+ const char *xpath)
+{
+ int match_len = 0, re_indx = 0, xp_indx = 0;
+ int rexp_len, xpath_len;
+ bool match = true, re_wild = false, xp_wild = false;
+ bool delim = false, enter_wild_match = false;
+ char wild_delim = 0;
+
+ rexp_len = strlen(xpath_regexp);
+ xpath_len = strlen(xpath);
+
+ /*
+ * Remove the trailing wildcard from the regexp and Xpath.
+ */
+ if (rexp_len && xpath_regexp[rexp_len-1] == '*')
+ rexp_len--;
+ if (xpath_len && xpath[xpath_len-1] == '*')
+ xpath_len--;
+
+ if (!rexp_len || !xpath_len)
+ return 0;
+
+ for (re_indx = 0, xp_indx = 0;
+ match && re_indx < rexp_len && xp_indx < xpath_len;) {
+ match = (xpath_regexp[re_indx] == xpath[xp_indx]);
+
+ /*
+ * Check if we need to enter wildcard matching.
+ */
+ if (!enter_wild_match && !match &&
+ (xpath_regexp[re_indx] == '*'
+ || xpath[xp_indx] == '*')) {
+ /*
+ * Found wildcard
+ */
+ enter_wild_match =
+ (xpath_regexp[re_indx-1] == '/'
+ || xpath_regexp[re_indx-1] == '\''
+ || xpath[xp_indx-1] == '/'
+ || xpath[xp_indx-1] == '\'');
+ if (enter_wild_match) {
+ if (xpath_regexp[re_indx] == '*') {
+ /*
+ * Begin RE wildcard match.
+ */
+ re_wild = true;
+ wild_delim = xpath_regexp[re_indx-1];
+ } else if (xpath[xp_indx] == '*') {
+ /*
+ * Begin XP wildcard match.
+ */
+ xp_wild = true;
+ wild_delim = xpath[xp_indx-1];
+ }
+ }
+ }
+
+ /*
+ * Check if we need to exit wildcard matching.
+ */
+ if (enter_wild_match) {
+ if (re_wild && xpath[xp_indx] == wild_delim) {
+ /*
+ * End RE wildcard matching.
+ */
+ re_wild = false;
+ if (re_indx < rexp_len-1)
+ re_indx++;
+ enter_wild_match = false;
+ } else if (xp_wild
+ && xpath_regexp[re_indx] == wild_delim) {
+ /*
+ * End XP wildcard matching.
+ */
+ xp_wild = false;
+ if (xp_indx < xpath_len-1)
+ xp_indx++;
+ enter_wild_match = false;
+ }
+ }
+
+ match = (xp_wild || re_wild
+ || xpath_regexp[re_indx] == xpath[xp_indx]);
+
+ /*
+ * Check if we found a delimiter in both the Xpaths
+ */
+ if ((xpath_regexp[re_indx] == '/'
+ && xpath[xp_indx] == '/')
+ || (xpath_regexp[re_indx] == ']'
+ && xpath[xp_indx] == ']')
+ || (xpath_regexp[re_indx] == '['
+ && xpath[xp_indx] == '[')) {
+ /*
+ * Increment the match count if we have a
+ * new delimiter.
+ */
+ if (match && re_indx && xp_indx && !delim)
+ match_len++;
+ delim = true;
+ } else {
+ delim = false;
+ }
+
+ /*
+ * Proceed to the next character in the RE/XP string as
+ * necessary.
+ */
+ if (!re_wild)
+ re_indx++;
+ if (!xp_wild)
+ xp_indx++;
+ }
+
+ /*
+ * If we finished matching and the last token was a full match
+ * increment the match count appropriately.
+ */
+ if (match && !delim &&
+ (xpath_regexp[re_indx] == '/'
+ || xpath_regexp[re_indx] == ']'))
+ match_len++;
+
+ return match_len;
+}
+
+static void mgmt_be_adapter_disconnect(struct mgmt_be_client_adapter *adapter)
+{
+ if (adapter->conn_fd >= 0) {
+ close(adapter->conn_fd);
+ adapter->conn_fd = -1;
+ }
+
+ /*
+ * Notify about client disconnect for appropriate cleanup
+ */
+ mgmt_txn_notify_be_adapter_conn(adapter, false);
+
+ if (adapter->id < MGMTD_BE_CLIENT_ID_MAX) {
+ mgmt_be_adapters_by_id[adapter->id] = NULL;
+ adapter->id = MGMTD_BE_CLIENT_ID_MAX;
+ }
+
+ mgmt_be_adapters_del(&mgmt_be_adapters, adapter);
+
+ mgmt_be_adapter_unlock(&adapter);
+}
+
+static void
+mgmt_be_adapter_cleanup_old_conn(struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_be_client_adapter *old;
+
+ FOREACH_ADAPTER_IN_LIST (old) {
+ if (old != adapter
+ && !strncmp(adapter->name, old->name, sizeof(adapter->name))) {
+ /*
+ * We have a Zombie lingering around
+ */
+ MGMTD_BE_ADAPTER_DBG(
+ "Client '%s' (FD:%d) seems to have reconnected. Removing old connection (FD:%d)!",
+ adapter->name, adapter->conn_fd, old->conn_fd);
+ mgmt_be_adapter_disconnect(old);
+ }
+ }
+}
+
+static int
+mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter,
+ Mgmtd__BeMessage *be_msg)
+{
+ /*
+ * protobuf-c adds a max size enum with an internal, and changing by
+ * version, name; cast to an int to avoid unhandled enum warnings
+ */
+ switch ((int)be_msg->message_case) {
+ case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ:
+ MGMTD_BE_ADAPTER_DBG(
+ "Got Subscribe Req Msg from '%s' to %sregister %u xpaths",
+ be_msg->subscr_req->client_name,
+ !be_msg->subscr_req->subscribe_xpaths
+ && be_msg->subscr_req->n_xpath_reg
+ ? "de"
+ : "",
+ (uint32_t)be_msg->subscr_req->n_xpath_reg);
+
+ if (strlen(be_msg->subscr_req->client_name)) {
+ strlcpy(adapter->name, be_msg->subscr_req->client_name,
+ sizeof(adapter->name));
+ adapter->id = mgmt_be_client_name2id(adapter->name);
+ if (adapter->id >= MGMTD_BE_CLIENT_ID_MAX) {
+ MGMTD_BE_ADAPTER_ERR(
+ "Unable to resolve adapter '%s' to a valid ID. Disconnecting!",
+ adapter->name);
+ mgmt_be_adapter_disconnect(adapter);
+ }
+ mgmt_be_adapters_by_id[adapter->id] = adapter;
+ mgmt_be_adapter_cleanup_old_conn(adapter);
+ }
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_TXN_REPLY:
+ MGMTD_BE_ADAPTER_DBG(
+ "Got %s TXN_REPLY Msg for Txn-Id 0x%llx from '%s' with '%s'",
+ be_msg->txn_reply->create ? "Create" : "Delete",
+ (unsigned long long)be_msg->txn_reply->txn_id,
+ adapter->name,
+ be_msg->txn_reply->success ? "success" : "failure");
+ /*
+ * Forward the TXN_REPLY to txn module.
+ */
+ mgmt_txn_notify_be_txn_reply(
+ be_msg->txn_reply->txn_id,
+ be_msg->txn_reply->create,
+ be_msg->txn_reply->success, adapter);
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REPLY:
+ MGMTD_BE_ADAPTER_DBG(
+ "Got CFGDATA_REPLY Msg from '%s' for Txn-Id 0x%llx Batch-Id 0x%llx with Err:'%s'",
+ adapter->name,
+ (unsigned long long)be_msg->cfg_data_reply->txn_id,
+ (unsigned long long)be_msg->cfg_data_reply->batch_id,
+ be_msg->cfg_data_reply->error_if_any
+ ? be_msg->cfg_data_reply->error_if_any
+ : "None");
+ /*
+ * Forward the CGFData-create reply to txn module.
+ */
+ mgmt_txn_notify_be_cfgdata_reply(
+ be_msg->cfg_data_reply->txn_id,
+ be_msg->cfg_data_reply->batch_id,
+ be_msg->cfg_data_reply->success,
+ be_msg->cfg_data_reply->error_if_any, adapter);
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REPLY:
+ MGMTD_BE_ADAPTER_DBG(
+ "Got %s CFG_APPLY_REPLY Msg from '%s' for Txn-Id 0x%llx for %d batches (Id 0x%llx-0x%llx), Err:'%s'",
+ be_msg->cfg_apply_reply->success ? "successful"
+ : "failed",
+ adapter->name,
+ (unsigned long long)
+ be_msg->cfg_apply_reply->txn_id,
+ (int)be_msg->cfg_apply_reply->n_batch_ids,
+ (unsigned long long)
+ be_msg->cfg_apply_reply->batch_ids[0],
+ (unsigned long long)be_msg->cfg_apply_reply
+ ->batch_ids[be_msg->cfg_apply_reply
+ ->n_batch_ids
+ - 1],
+ be_msg->cfg_apply_reply->error_if_any
+ ? be_msg->cfg_apply_reply->error_if_any
+ : "None");
+ /*
+ * Forward the CGFData-apply reply to txn module.
+ */
+ mgmt_txn_notify_be_cfg_apply_reply(
+ be_msg->cfg_apply_reply->txn_id,
+ be_msg->cfg_apply_reply->success,
+ (uint64_t *)be_msg->cfg_apply_reply->batch_ids,
+ be_msg->cfg_apply_reply->n_batch_ids,
+ be_msg->cfg_apply_reply->error_if_any, adapter);
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_GET_REPLY:
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REPLY:
+ case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REPLY:
+ case MGMTD__BE_MESSAGE__MESSAGE_NOTIFY_DATA:
+ /*
+ * TODO: Add handling code in future.
+ */
+ break;
+ /*
+ * NOTE: The following messages are always sent from MGMTD to
+ * Backend clients only and/or need not be handled on MGMTd.
+ */
+ case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REPLY:
+ case MGMTD__BE_MESSAGE__MESSAGE_GET_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE__NOT_SET:
+ default:
+ /*
+ * A 'default' case is being added contrary to the
+ * FRR code guidelines to take care of build
+ * failures on certain build systems (courtesy of
+ * the proto-c package).
+ */
+ break;
+ }
+
+ return 0;
+}
+
+static inline void
+mgmt_be_adapter_sched_msg_write(struct mgmt_be_client_adapter *adapter)
+{
+ if (!CHECK_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF))
+ mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_WRITE);
+}
+
+static inline void
+mgmt_be_adapter_writes_on(struct mgmt_be_client_adapter *adapter)
+{
+ MGMTD_BE_ADAPTER_DBG("Resume writing msgs for '%s'", adapter->name);
+ UNSET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF);
+ mgmt_be_adapter_sched_msg_write(adapter);
+}
+
+static inline void
+mgmt_be_adapter_writes_off(struct mgmt_be_client_adapter *adapter)
+{
+ SET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF);
+ MGMTD_BE_ADAPTER_DBG("Pause writing msgs for '%s'", adapter->name);
+}
+
+static int mgmt_be_adapter_send_msg(struct mgmt_be_client_adapter *adapter,
+ Mgmtd__BeMessage *be_msg)
+{
+ if (adapter->conn_fd == -1) {
+ MGMTD_BE_ADAPTER_DBG("can't send message on closed connection");
+ return -1;
+ }
+
+ int rv = mgmt_msg_send_msg(
+ &adapter->mstate, be_msg,
+ mgmtd__be_message__get_packed_size(be_msg),
+ (size_t(*)(void *, void *))mgmtd__be_message__pack,
+ mgmt_debug_be);
+ mgmt_be_adapter_sched_msg_write(adapter);
+ return rv;
+}
+
+static int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, bool create)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeTxnReq txn_req;
+
+ mgmtd__be_txn_req__init(&txn_req);
+ txn_req.create = create;
+ txn_req.txn_id = txn_id;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ;
+ be_msg.txn_req = &txn_req;
+
+ MGMTD_BE_ADAPTER_DBG(
+ "Sending TXN_REQ message to Backend client '%s' for Txn-Id %llx",
+ adapter->name, (unsigned long long)txn_id);
+
+ return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
+static int
+mgmt_be_send_cfgdata_create_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, uint64_t batch_id,
+ Mgmtd__YangCfgDataReq **cfgdata_reqs,
+ size_t num_reqs, bool end_of_data)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeCfgDataCreateReq cfgdata_req;
+
+ mgmtd__be_cfg_data_create_req__init(&cfgdata_req);
+ cfgdata_req.batch_id = batch_id;
+ cfgdata_req.txn_id = txn_id;
+ cfgdata_req.data_req = cfgdata_reqs;
+ cfgdata_req.n_data_req = num_reqs;
+ cfgdata_req.end_of_data = end_of_data;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ;
+ be_msg.cfg_data_req = &cfgdata_req;
+
+ MGMTD_BE_ADAPTER_DBG(
+ "Sending CFGDATA_CREATE_REQ message to Backend client '%s' for Txn-Id %llx, Batch-Id: %llx",
+ adapter->name, (unsigned long long)txn_id,
+ (unsigned long long)batch_id);
+
+ return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
+static int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeCfgDataApplyReq apply_req;
+
+ mgmtd__be_cfg_data_apply_req__init(&apply_req);
+ apply_req.txn_id = txn_id;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ;
+ be_msg.cfg_apply_req = &apply_req;
+
+ MGMTD_BE_ADAPTER_DBG(
+ "Sending CFG_APPLY_REQ message to Backend client '%s' for Txn-Id 0x%llx",
+ adapter->name, (unsigned long long)txn_id);
+
+ return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
+static void mgmt_be_adapter_process_msg(void *user_ctx, uint8_t *data,
+ size_t len)
+{
+ struct mgmt_be_client_adapter *adapter = user_ctx;
+ Mgmtd__BeMessage *be_msg;
+
+ be_msg = mgmtd__be_message__unpack(NULL, len, data);
+ if (!be_msg) {
+ MGMTD_BE_ADAPTER_DBG(
+ "Failed to decode %zu bytes for adapter: %s", len,
+ adapter->name);
+ return;
+ }
+ MGMTD_BE_ADAPTER_DBG("Decoded %zu bytes of message: %u for adapter: %s",
+ len, be_msg->message_case, adapter->name);
+ (void)mgmt_be_adapter_handle_msg(adapter, be_msg);
+ mgmtd__be_message__free_unpacked(be_msg, NULL);
+}
+
+static void mgmt_be_adapter_proc_msgbufs(struct event *thread)
+{
+ struct mgmt_be_client_adapter *adapter = EVENT_ARG(thread);
+
+ if (mgmt_msg_procbufs(&adapter->mstate, mgmt_be_adapter_process_msg,
+ adapter, mgmt_debug_be))
+ mgmt_be_adapter_register_event(adapter, MGMTD_BE_PROC_MSG);
+}
+
+static void mgmt_be_adapter_read(struct event *thread)
+{
+ struct mgmt_be_client_adapter *adapter;
+ enum mgmt_msg_rsched rv;
+
+ adapter = (struct mgmt_be_client_adapter *)EVENT_ARG(thread);
+
+ rv = mgmt_msg_read(&adapter->mstate, adapter->conn_fd, mgmt_debug_be);
+ if (rv == MSR_DISCONNECT) {
+ mgmt_be_adapter_disconnect(adapter);
+ return;
+ }
+ if (rv == MSR_SCHED_BOTH)
+ mgmt_be_adapter_register_event(adapter, MGMTD_BE_PROC_MSG);
+ mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_READ);
+}
+
+static void mgmt_be_adapter_write(struct event *thread)
+{
+ struct mgmt_be_client_adapter *adapter = EVENT_ARG(thread);
+ enum mgmt_msg_wsched rv;
+
+ rv = mgmt_msg_write(&adapter->mstate, adapter->conn_fd, mgmt_debug_be);
+ if (rv == MSW_SCHED_STREAM)
+ mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_WRITE);
+ else if (rv == MSW_DISCONNECT)
+ mgmt_be_adapter_disconnect(adapter);
+ else if (rv == MSW_SCHED_WRITES_OFF) {
+ mgmt_be_adapter_writes_off(adapter);
+ mgmt_be_adapter_register_event(adapter,
+ MGMTD_BE_CONN_WRITES_ON);
+ } else
+ assert(rv == MSW_SCHED_NONE);
+}
+
+static void mgmt_be_adapter_resume_writes(struct event *thread)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ adapter = (struct mgmt_be_client_adapter *)EVENT_ARG(thread);
+ assert(adapter && adapter->conn_fd >= 0);
+
+ mgmt_be_adapter_writes_on(adapter);
+}
+
+static void mgmt_be_iter_and_get_cfg(struct mgmt_ds_ctx *ds_ctx,
+ char *xpath, struct lyd_node *node,
+ struct nb_node *nb_node, void *ctx)
+{
+ struct mgmt_be_client_subscr_info subscr_info;
+ struct mgmt_be_get_adapter_config_params *parms;
+ struct mgmt_be_client_adapter *adapter;
+ struct nb_config_cbs *root;
+ uint32_t *seq;
+
+ if (mgmt_be_get_subscr_info_for_xpath(xpath, &subscr_info) != 0) {
+ MGMTD_BE_ADAPTER_ERR(
+ "ERROR: Failed to get subscriber for '%s'", xpath);
+ return;
+ }
+
+ parms = (struct mgmt_be_get_adapter_config_params *)ctx;
+
+ adapter = parms->adapter;
+ if (!subscr_info.xpath_subscr[adapter->id].subscribed)
+ return;
+
+ root = parms->cfg_chgs;
+ seq = &parms->seq;
+ nb_config_diff_created(node, seq, root);
+}
+
+static void mgmt_be_adapter_conn_init(struct event *thread)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ adapter = (struct mgmt_be_client_adapter *)EVENT_ARG(thread);
+ assert(adapter && adapter->conn_fd >= 0);
+
+ /*
+ * Check first if the current session can run a CONFIG
+ * transaction or not. Reschedule if a CONFIG transaction
+ * from another session is already in progress.
+ */
+ if (mgmt_config_txn_in_progress() != MGMTD_SESSION_ID_NONE) {
+ mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_INIT);
+ return;
+ }
+
+ /*
+ * Notify TXN module to create a CONFIG transaction and
+ * download the CONFIGs identified for this new client.
+ * If the TXN module fails to initiate the CONFIG transaction
+ * disconnect from the client forcing a reconnect later.
+ * That should also take care of destroying the adapter.
+ */
+ if (mgmt_txn_notify_be_adapter_conn(adapter, true) != 0) {
+ mgmt_be_adapter_disconnect(adapter);
+ adapter = NULL;
+ }
+}
+
+static void
+mgmt_be_adapter_register_event(struct mgmt_be_client_adapter *adapter,
+ enum mgmt_be_event event)
+{
+ struct timeval tv = {0};
+
+ switch (event) {
+ case MGMTD_BE_CONN_INIT:
+ event_add_timer_msec(mgmt_be_adapter_tm,
+ mgmt_be_adapter_conn_init, adapter,
+ MGMTD_BE_CONN_INIT_DELAY_MSEC,
+ &adapter->conn_init_ev);
+ break;
+ case MGMTD_BE_CONN_READ:
+ event_add_read(mgmt_be_adapter_tm, mgmt_be_adapter_read,
+ adapter, adapter->conn_fd, &adapter->conn_read_ev);
+ break;
+ case MGMTD_BE_CONN_WRITE:
+ if (adapter->conn_write_ev)
+ MGMTD_BE_ADAPTER_DBG(
+ "write ready notify already set for client %s",
+ adapter->name);
+ else
+ MGMTD_BE_ADAPTER_DBG(
+ "scheduling write ready notify for client %s",
+ adapter->name);
+ event_add_write(mgmt_be_adapter_tm, mgmt_be_adapter_write,
+ adapter, adapter->conn_fd, &adapter->conn_write_ev);
+ assert(adapter->conn_write_ev);
+ break;
+ case MGMTD_BE_PROC_MSG:
+ tv.tv_usec = MGMTD_BE_MSG_PROC_DELAY_USEC;
+ event_add_timer_tv(mgmt_be_adapter_tm,
+ mgmt_be_adapter_proc_msgbufs, adapter, &tv,
+ &adapter->proc_msg_ev);
+ break;
+ case MGMTD_BE_CONN_WRITES_ON:
+ event_add_timer_msec(mgmt_be_adapter_tm,
+ mgmt_be_adapter_resume_writes, adapter,
+ MGMTD_BE_MSG_WRITE_DELAY_MSEC,
+ &adapter->conn_writes_on);
+ break;
+ case MGMTD_BE_SERVER:
+ case MGMTD_BE_SCHED_CFG_PREPARE:
+ case MGMTD_BE_RESCHED_CFG_PREPARE:
+ case MGMTD_BE_SCHED_CFG_APPLY:
+ case MGMTD_BE_RESCHED_CFG_APPLY:
+ assert(!"mgmt_be_adapter_post_event() called incorrectly");
+ break;
+ }
+}
+
+void mgmt_be_adapter_lock(struct mgmt_be_client_adapter *adapter)
+{
+ adapter->refcount++;
+}
+
+extern void mgmt_be_adapter_unlock(struct mgmt_be_client_adapter **adapter)
+{
+ assert(*adapter && (*adapter)->refcount);
+
+ (*adapter)->refcount--;
+ if (!(*adapter)->refcount) {
+ mgmt_be_adapters_del(&mgmt_be_adapters, *adapter);
+ EVENT_OFF((*adapter)->conn_init_ev);
+ EVENT_OFF((*adapter)->conn_read_ev);
+ EVENT_OFF((*adapter)->conn_write_ev);
+ EVENT_OFF((*adapter)->conn_writes_on);
+ EVENT_OFF((*adapter)->proc_msg_ev);
+ mgmt_msg_destroy(&(*adapter)->mstate);
+ XFREE(MTYPE_MGMTD_BE_ADPATER, *adapter);
+ }
+
+ *adapter = NULL;
+}
+
+int mgmt_be_adapter_init(struct event_loop *tm)
+{
+ if (!mgmt_be_adapter_tm) {
+ mgmt_be_adapter_tm = tm;
+ memset(mgmt_xpath_map, 0, sizeof(mgmt_xpath_map));
+ mgmt_num_xpath_maps = 0;
+ memset(mgmt_be_adapters_by_id, 0,
+ sizeof(mgmt_be_adapters_by_id));
+ mgmt_be_adapters_init(&mgmt_be_adapters);
+ mgmt_be_xpath_map_init();
+ }
+
+ return 0;
+}
+
+void mgmt_be_adapter_destroy(void)
+{
+ mgmt_be_cleanup_adapters();
+}
+
+struct mgmt_be_client_adapter *
+mgmt_be_create_adapter(int conn_fd, union sockunion *from)
+{
+ struct mgmt_be_client_adapter *adapter = NULL;
+
+ adapter = mgmt_be_find_adapter_by_fd(conn_fd);
+ if (!adapter) {
+ adapter = XCALLOC(MTYPE_MGMTD_BE_ADPATER,
+ sizeof(struct mgmt_be_client_adapter));
+ assert(adapter);
+
+ adapter->conn_fd = conn_fd;
+ adapter->id = MGMTD_BE_CLIENT_ID_MAX;
+ memcpy(&adapter->conn_su, from, sizeof(adapter->conn_su));
+ snprintf(adapter->name, sizeof(adapter->name), "Unknown-FD-%d",
+ adapter->conn_fd);
+ mgmt_msg_init(&adapter->mstate, MGMTD_BE_MAX_NUM_MSG_PROC,
+ MGMTD_BE_MAX_NUM_MSG_WRITE, MGMTD_BE_MSG_MAX_LEN,
+ "BE-adapter");
+ mgmt_be_adapter_lock(adapter);
+
+ mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_READ);
+ mgmt_be_adapters_add_tail(&mgmt_be_adapters, adapter);
+
+ RB_INIT(nb_config_cbs, &adapter->cfg_chgs);
+
+ MGMTD_BE_ADAPTER_DBG("Added new MGMTD Backend adapter '%s'",
+ adapter->name);
+ }
+
+ /* Make client socket non-blocking. */
+ set_nonblocking(adapter->conn_fd);
+ setsockopt_so_sendbuf(adapter->conn_fd, MGMTD_SOCKET_BE_SEND_BUF_SIZE);
+ setsockopt_so_recvbuf(adapter->conn_fd, MGMTD_SOCKET_BE_RECV_BUF_SIZE);
+
+ /* Trigger resync of config with the new adapter */
+ mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_INIT);
+
+ return adapter;
+}
+
+struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_id(enum mgmt_be_client_id id)
+{
+ return (id < MGMTD_BE_CLIENT_ID_MAX ? mgmt_be_adapters_by_id[id]
+ : NULL);
+}
+
+struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_name(const char *name)
+{
+ return mgmt_be_find_adapter_by_name(name);
+}
+
+int mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter,
+ struct mgmt_ds_ctx *ds_ctx,
+ struct nb_config_cbs **cfg_chgs)
+{
+ char base_xpath[] = "/";
+ struct mgmt_be_get_adapter_config_params parms;
+
+ assert(cfg_chgs);
+
+ if (RB_EMPTY(nb_config_cbs, &adapter->cfg_chgs)) {
+ parms.adapter = adapter;
+ parms.cfg_chgs = &adapter->cfg_chgs;
+ parms.seq = 0;
+
+ mgmt_ds_iter_data(ds_ctx, base_xpath,
+ mgmt_be_iter_and_get_cfg, (void *)&parms,
+ false);
+ }
+
+ *cfg_chgs = &adapter->cfg_chgs;
+ return 0;
+}
+
+int mgmt_be_create_txn(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id)
+{
+ return mgmt_be_send_txn_req(adapter, txn_id, true);
+}
+
+int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id)
+{
+ return mgmt_be_send_txn_req(adapter, txn_id, false);
+}
+
+int mgmt_be_send_cfg_data_create_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, uint64_t batch_id,
+ struct mgmt_be_cfgreq *cfg_req,
+ bool end_of_data)
+{
+ return mgmt_be_send_cfgdata_create_req(
+ adapter, txn_id, batch_id, cfg_req->cfgdata_reqs,
+ cfg_req->num_reqs, end_of_data);
+}
+
+extern int
+mgmt_be_send_cfg_apply_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id)
+{
+ return mgmt_be_send_cfgapply_req(adapter, txn_id);
+}
+
+/*
+ * This function maps a YANG dtata Xpath to one or more
+ * Backend Clients that should be contacted for various purposes.
+ */
+int mgmt_be_get_subscr_info_for_xpath(
+ const char *xpath, struct mgmt_be_client_subscr_info *subscr_info)
+{
+ int indx, match, max_match = 0, num_reg;
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_subscr_info
+ *reg_maps[array_size(mgmt_xpath_map)] = {0};
+ bool root_xp = false;
+
+ if (!subscr_info)
+ return -1;
+
+ num_reg = 0;
+ memset(subscr_info, 0, sizeof(*subscr_info));
+
+ if (strlen(xpath) <= 2 && xpath[0] == '/'
+ && (!xpath[1] || xpath[1] == '*')) {
+ root_xp = true;
+ }
+
+ MGMTD_BE_ADAPTER_DBG("XPATH: %s", xpath);
+ for (indx = 0; indx < mgmt_num_xpath_maps; indx++) {
+ /*
+ * For Xpaths: '/' and '/ *' all xpath maps should match
+ * the given xpath.
+ */
+ if (!root_xp) {
+ match = mgmt_be_eval_regexp_match(
+ mgmt_xpath_map[indx].xpath_regexp, xpath);
+
+ if (!match || match < max_match)
+ continue;
+
+ if (match > max_match) {
+ num_reg = 0;
+ max_match = match;
+ }
+ }
+
+ reg_maps[num_reg] = &mgmt_xpath_map[indx].be_subscrs;
+ num_reg++;
+ }
+
+ for (indx = 0; indx < num_reg; indx++) {
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (reg_maps[indx]->xpath_subscr[id].subscribed) {
+ MGMTD_BE_ADAPTER_DBG(
+ "Cient: %s",
+ mgmt_be_client_id2name(id));
+ memcpy(&subscr_info->xpath_subscr[id],
+ ®_maps[indx]->xpath_subscr[id],
+ sizeof(subscr_info->xpath_subscr[id]));
+ }
+ }
+ }
+
+ return 0;
+}
+
+void mgmt_be_adapter_status_write(struct vty *vty)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ vty_out(vty, "MGMTD Backend Adapters\n");
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ vty_out(vty, " Client: \t\t\t%s\n", adapter->name);
+ vty_out(vty, " Conn-FD: \t\t\t%d\n", adapter->conn_fd);
+ vty_out(vty, " Client-Id: \t\t\t%d\n", adapter->id);
+ vty_out(vty, " Ref-Count: \t\t\t%u\n", adapter->refcount);
+ vty_out(vty, " Msg-Recvd: \t\t\t%" PRIu64 "\n",
+ adapter->mstate.nrxm);
+ vty_out(vty, " Bytes-Recvd: \t\t%" PRIu64 "\n",
+ adapter->mstate.nrxb);
+ vty_out(vty, " Msg-Sent: \t\t\t%" PRIu64 "\n",
+ adapter->mstate.ntxm);
+ vty_out(vty, " Bytes-Sent: \t\t%" PRIu64 "\n",
+ adapter->mstate.ntxb);
+ }
+ vty_out(vty, " Total: %d\n",
+ (int)mgmt_be_adapters_count(&mgmt_be_adapters));
+}
+
+void mgmt_be_xpath_register_write(struct vty *vty)
+{
+ int indx;
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+
+ vty_out(vty, "MGMTD Backend XPath Registry\n");
+
+ for (indx = 0; indx < mgmt_num_xpath_maps; indx++) {
+ vty_out(vty, " - XPATH: '%s'\n",
+ mgmt_xpath_map[indx].xpath_regexp);
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (mgmt_xpath_map[indx]
+ .be_subscrs.xpath_subscr[id]
+ .subscribed) {
+ vty_out(vty,
+ " -- Client: '%s' \t Validate:%s, Notify:%s, Own:%s\n",
+ mgmt_be_client_id2name(id),
+ mgmt_xpath_map[indx]
+ .be_subscrs
+ .xpath_subscr[id]
+ .validate_config
+ ? "T"
+ : "F",
+ mgmt_xpath_map[indx]
+ .be_subscrs
+ .xpath_subscr[id]
+ .notify_config
+ ? "T"
+ : "F",
+ mgmt_xpath_map[indx]
+ .be_subscrs
+ .xpath_subscr[id]
+ .own_oper_data
+ ? "T"
+ : "F");
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (adapter) {
+ vty_out(vty, " -- Adapter: %p\n",
+ adapter);
+ }
+ }
+ }
+ }
+
+ vty_out(vty, "Total XPath Registries: %u\n", mgmt_num_xpath_maps);
+}
+
+void mgmt_be_xpath_subscr_info_write(struct vty *vty, const char *xpath)
+{
+ struct mgmt_be_client_subscr_info subscr;
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+
+ if (mgmt_be_get_subscr_info_for_xpath(xpath, &subscr) != 0) {
+ vty_out(vty, "ERROR: Failed to get subscriber for '%s'\n",
+ xpath);
+ return;
+ }
+
+ vty_out(vty, "XPath: '%s'\n", xpath);
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (subscr.xpath_subscr[id].subscribed) {
+ vty_out(vty,
+ " -- Client: '%s' \t Validate:%s, Notify:%s, Own:%s\n",
+ mgmt_be_client_id2name(id),
+ subscr.xpath_subscr[id].validate_config ? "T"
+ : "F",
+ subscr.xpath_subscr[id].notify_config ? "T"
+ : "F",
+ subscr.xpath_subscr[id].own_oper_data ? "T"
+ : "F");
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (adapter)
+ vty_out(vty, " -- Adapter: %p\n", adapter);
+ }
+ }
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Client Connection Adapter
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_BE_ADAPTER_H_
+#define _FRR_MGMTD_BE_ADAPTER_H_
+
+#include "mgmt_be_client.h"
+#include "mgmt_msg.h"
+#include "mgmtd/mgmt_defines.h"
+#include "mgmtd/mgmt_ds.h"
+
+#define MGMTD_BE_CONN_INIT_DELAY_MSEC 50
+
+#define MGMTD_FIND_ADAPTER_BY_INDEX(adapter_index) \
+ mgmt_adaptr_ref[adapter_index]
+
+enum mgmt_be_req_type {
+ MGMTD_BE_REQ_NONE = 0,
+ MGMTD_BE_REQ_CFG_VALIDATE,
+ MGMTD_BE_REQ_CFG_APPLY,
+ MGMTD_BE_REQ_DATA_GET_ELEM,
+ MGMTD_BE_REQ_DATA_GET_NEXT
+};
+
+struct mgmt_be_cfgreq {
+ Mgmtd__YangCfgDataReq **cfgdata_reqs;
+ size_t num_reqs;
+};
+
+struct mgmt_be_datareq {
+ Mgmtd__YangGetDataReq **getdata_reqs;
+ size_t num_reqs;
+};
+
+PREDECL_LIST(mgmt_be_adapters);
+PREDECL_LIST(mgmt_txn_badapters);
+
+struct mgmt_be_client_adapter {
+ enum mgmt_be_client_id id;
+ int conn_fd;
+ union sockunion conn_su;
+ struct event *conn_init_ev;
+ struct event *conn_read_ev;
+ struct event *conn_write_ev;
+ struct event *conn_writes_on;
+ struct event *proc_msg_ev;
+ uint32_t flags;
+ char name[MGMTD_CLIENT_NAME_MAX_LEN];
+ uint8_t num_xpath_reg;
+ char xpath_reg[MGMTD_MAX_NUM_XPATH_REG][MGMTD_MAX_XPATH_LEN];
+
+ /* IO streams for read and write */
+ struct mgmt_msg_state mstate;
+
+ int refcount;
+
+ /*
+ * List of config items that should be sent to the
+ * backend during re/connect. This is temporarily
+ * created and then freed-up as soon as the initial
+ * config items has been applied onto the backend.
+ */
+ struct nb_config_cbs cfg_chgs;
+
+ struct mgmt_be_adapters_item list_linkage;
+ struct mgmt_txn_badapters_item txn_list_linkage;
+};
+
+#define MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF (1U << 0)
+#define MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED (1U << 1)
+
+DECLARE_LIST(mgmt_be_adapters, struct mgmt_be_client_adapter, list_linkage);
+DECLARE_LIST(mgmt_txn_badapters, struct mgmt_be_client_adapter,
+ txn_list_linkage);
+
+union mgmt_be_xpath_subscr_info {
+ uint8_t subscribed;
+ struct {
+ uint8_t validate_config : 1;
+ uint8_t notify_config : 1;
+ uint8_t own_oper_data : 1;
+ };
+};
+
+struct mgmt_be_client_subscr_info {
+ union mgmt_be_xpath_subscr_info xpath_subscr[MGMTD_BE_CLIENT_ID_MAX];
+};
+
+/* Initialise backend adapter module. */
+extern int mgmt_be_adapter_init(struct event_loop *tm);
+
+/* Destroy the backend adapter module. */
+extern void mgmt_be_adapter_destroy(void);
+
+/* Acquire lock for backend adapter. */
+extern void mgmt_be_adapter_lock(struct mgmt_be_client_adapter *adapter);
+
+/* Remove lock from backend adapter. */
+extern void mgmt_be_adapter_unlock(struct mgmt_be_client_adapter **adapter);
+
+/* Create backend adapter. */
+extern struct mgmt_be_client_adapter *
+mgmt_be_create_adapter(int conn_fd, union sockunion *su);
+
+/* Fetch backend adapter given an adapter name. */
+extern struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_name(const char *name);
+
+/* Fetch backend adapter given an client ID. */
+extern struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_id(enum mgmt_be_client_id id);
+
+/* Fetch backend adapter config. */
+extern int
+mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter,
+ struct mgmt_ds_ctx *ds_ctx,
+ struct nb_config_cbs **cfg_chgs);
+
+/* Create a transaction. */
+extern int mgmt_be_create_txn(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id);
+
+/* Destroy a transaction. */
+extern int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id);
+
+/*
+ * Send config data create request to backend client.
+ *
+ * adaptr
+ * Backend adapter information.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * batch_id
+ * Request batch ID.
+ *
+ * cfg_req
+ * Config data request.
+ *
+ * end_of_data
+ * TRUE if the data from last batch, FALSE otherwise.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_be_send_cfg_data_create_req(
+ struct mgmt_be_client_adapter *adapter, uint64_t txn_id,
+ uint64_t batch_id, struct mgmt_be_cfgreq *cfg_req, bool end_of_data);
+
+/*
+ * Send config validate request to backend client.
+ *
+ * adaptr
+ * Backend adapter information.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * batch_ids
+ * List of request batch IDs.
+ *
+ * num_batch_ids
+ * Number of batch ids.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int
+mgmt_be_send_cfg_validate_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, uint64_t batch_ids[],
+ size_t num_batch_ids);
+
+/*
+ * Send config apply request to backend client.
+ *
+ * adaptr
+ * Backend adapter information.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int
+mgmt_be_send_cfg_apply_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id);
+
+/*
+ * Dump backend adapter status to vty.
+ */
+extern void mgmt_be_adapter_status_write(struct vty *vty);
+
+/*
+ * Dump xpath registry for each backend client to vty.
+ */
+extern void mgmt_be_xpath_register_write(struct vty *vty);
+
+/*
+ * Maps a YANG dtata Xpath to one or more
+ * backend clients that should be contacted for various purposes.
+ */
+extern int mgmt_be_get_subscr_info_for_xpath(
+ const char *xpath, struct mgmt_be_client_subscr_info *subscr_info);
+
+/*
+ * Dump backend client information for a given xpath to vty.
+ */
+extern void mgmt_be_xpath_subscr_info_write(struct vty *vty,
+ const char *xpath);
+
+#endif /* _FRR_MGMTD_BE_ADAPTER_H_ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Server
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "network.h"
+#include "libfrr.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_be_server.h"
+#include "mgmtd/mgmt_be_adapter.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_BE_SRVR_DBG(fmt, ...) \
+ fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_BE_SRVR_ERR(fmt, ...) \
+ fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_BE_SRVR_DBG(fmt, ...) \
+ do { \
+ if (mgmt_debug_be) \
+ zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define MGMTD_BE_SRVR_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+static int mgmt_be_listen_fd = -1;
+static struct event_loop *mgmt_be_listen_tm;
+static struct event *mgmt_be_listen_ev;
+static void mgmt_be_server_register_event(enum mgmt_be_event event);
+
+static void mgmt_be_conn_accept(struct event *thread)
+{
+ int client_conn_fd;
+ union sockunion su;
+
+ if (mgmt_be_listen_fd < 0)
+ return;
+
+ /* We continue hearing server listen socket. */
+ mgmt_be_server_register_event(MGMTD_BE_SERVER);
+
+ memset(&su, 0, sizeof(union sockunion));
+
+ /* We can handle IPv4 or IPv6 socket. */
+ client_conn_fd = sockunion_accept(mgmt_be_listen_fd, &su);
+ if (client_conn_fd < 0) {
+ MGMTD_BE_SRVR_ERR(
+ "Failed to accept MGMTD Backend client connection : %s",
+ safe_strerror(errno));
+ return;
+ }
+ set_nonblocking(client_conn_fd);
+ set_cloexec(client_conn_fd);
+
+ MGMTD_BE_SRVR_DBG("Got a new MGMTD Backend connection");
+
+ mgmt_be_create_adapter(client_conn_fd, &su);
+}
+
+static void mgmt_be_server_register_event(enum mgmt_be_event event)
+{
+ if (event == MGMTD_BE_SERVER) {
+ event_add_read(mgmt_be_listen_tm, mgmt_be_conn_accept,
+ NULL, mgmt_be_listen_fd,
+ &mgmt_be_listen_ev);
+ assert(mgmt_be_listen_ev);
+ } else {
+ assert(!"mgmt_be_server_post_event() called incorrectly");
+ }
+}
+
+static void mgmt_be_server_start(const char *hostname)
+{
+ int ret;
+ int sock;
+ struct sockaddr_un addr;
+ mode_t old_mask;
+
+ /* Set umask */
+ old_mask = umask(0077);
+
+ sock = socket(AF_UNIX, SOCK_STREAM, PF_UNSPEC);
+ if (sock < 0) {
+ MGMTD_BE_SRVR_ERR("Failed to create server socket: %s",
+ safe_strerror(errno));
+ goto mgmt_be_server_start_failed;
+ }
+
+ addr.sun_family = AF_UNIX,
+ strlcpy(addr.sun_path, MGMTD_BE_SERVER_PATH, sizeof(addr.sun_path));
+ unlink(addr.sun_path);
+ ret = bind(sock, (struct sockaddr *)&addr, sizeof(addr));
+ if (ret < 0) {
+ MGMTD_BE_SRVR_ERR(
+ "Failed to bind server socket to '%s'. Err: %s",
+ addr.sun_path, safe_strerror(errno));
+ goto mgmt_be_server_start_failed;
+ }
+
+ ret = listen(sock, MGMTD_BE_MAX_CONN);
+ if (ret < 0) {
+ MGMTD_BE_SRVR_ERR("Failed to listen on server socket: %s",
+ safe_strerror(errno));
+ goto mgmt_be_server_start_failed;
+ }
+
+ /* Restore umask */
+ umask(old_mask);
+
+ mgmt_be_listen_fd = sock;
+ mgmt_be_server_register_event(MGMTD_BE_SERVER);
+
+ MGMTD_BE_SRVR_DBG("Started MGMTD Backend Server!");
+ return;
+
+mgmt_be_server_start_failed:
+ if (sock)
+ close(sock);
+
+ mgmt_be_listen_fd = -1;
+ exit(-1);
+}
+
+int mgmt_be_server_init(struct event_loop *master)
+{
+ if (mgmt_be_listen_tm) {
+ MGMTD_BE_SRVR_DBG("MGMTD Backend Server already running!");
+ return 0;
+ }
+
+ mgmt_be_listen_tm = master;
+
+ mgmt_be_server_start("localhost");
+
+ return 0;
+}
+
+void mgmt_be_server_destroy(void)
+{
+ if (mgmt_be_listen_tm) {
+ MGMTD_BE_SRVR_DBG("Closing MGMTD Backend Server!");
+
+ if (mgmt_be_listen_ev) {
+ EVENT_OFF(mgmt_be_listen_ev);
+ mgmt_be_listen_ev = NULL;
+ }
+
+ if (mgmt_be_listen_fd >= 0) {
+ close(mgmt_be_listen_fd);
+ mgmt_be_listen_fd = -1;
+ }
+
+ mgmt_be_listen_tm = NULL;
+ }
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Server
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar
+ */
+
+#ifndef _FRR_MGMTD_BE_SERVER_H_
+#define _FRR_MGMTD_BE_SERVER_H_
+
+#define MGMTD_BE_MAX_CONN 32
+
+/* Initialise backend server */
+extern int mgmt_be_server_init(struct event_loop *master);
+
+/* Destroy backend server */
+extern void mgmt_be_server_destroy(void);
+
+#endif /* _FRR_MGMTD_BE_SERVER_H_ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD public defines.
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_DEFINES_H
+#define _FRR_MGMTD_DEFINES_H
+
+#include "yang.h"
+
+#define MGMTD_CLIENT_NAME_MAX_LEN 32
+
+#define MGMTD_MAX_XPATH_LEN XPATH_MAXLEN
+
+#define MGMTD_MAX_YANG_VALUE_LEN YANG_VALUE_MAXLEN
+
+#define MGMTD_MAX_NUM_XPATH_REG 128
+
+#define MGMTD_MAX_NUM_DATA_REQ_IN_BATCH 32
+#define MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH 8
+
+enum mgmt_result {
+ MGMTD_SUCCESS = 0,
+ MGMTD_INVALID_PARAM,
+ MGMTD_INTERNAL_ERROR,
+ MGMTD_NO_CFG_CHANGES,
+ MGMTD_DS_LOCK_FAILED,
+ MGMTD_DS_UNLOCK_FAILED,
+ MGMTD_UNKNOWN_FAILURE
+};
+
+enum mgmt_fe_event {
+ MGMTD_FE_SERVER = 1,
+ MGMTD_FE_CONN_READ,
+ MGMTD_FE_CONN_WRITE,
+ MGMTD_FE_CONN_WRITES_ON,
+ MGMTD_FE_PROC_MSG
+};
+
+enum mgmt_be_event {
+ MGMTD_BE_SERVER = 1,
+ MGMTD_BE_CONN_INIT,
+ MGMTD_BE_CONN_READ,
+ MGMTD_BE_CONN_WRITE,
+ MGMTD_BE_CONN_WRITES_ON,
+ MGMTD_BE_PROC_MSG,
+ MGMTD_BE_SCHED_CFG_PREPARE,
+ MGMTD_BE_RESCHED_CFG_PREPARE,
+ MGMTD_BE_SCHED_CFG_APPLY,
+ MGMTD_BE_RESCHED_CFG_APPLY,
+};
+
+#define MGMTD_TXN_ID_NONE 0
+
+#define MGMTD_TXN_BATCH_ID_NONE 0
+
+#endif /* _FRR_MGMTD_DEFINES_H */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Datastores
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "md5.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+#include "mgmtd/mgmt_txn.h"
+#include "libyang/libyang.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_DS_DBG(fmt, ...) \
+ fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_DS_ERR(fmt, ...) \
+ fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_DS_DBG(fmt, ...) \
+ do { \
+ if (mgmt_debug_ds) \
+ zlog_err("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define MGMTD_DS_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+struct mgmt_ds_ctx {
+ Mgmtd__DatastoreId ds_id;
+ int lock; /* 0 unlocked, >0 read locked < write locked */
+
+ bool config_ds;
+
+ union {
+ struct nb_config *cfg_root;
+ struct lyd_node *dnode_root;
+ } root;
+};
+
+const char *mgmt_ds_names[MGMTD_DS_MAX_ID + 1] = {
+ MGMTD_DS_NAME_NONE, /* MGMTD_DS_NONE */
+ MGMTD_DS_NAME_RUNNING, /* MGMTD_DS_RUNNING */
+ MGMTD_DS_NAME_CANDIDATE, /* MGMTD_DS_CANDIDATE */
+ MGMTD_DS_NAME_OPERATIONAL, /* MGMTD_DS_OPERATIONAL */
+ "Unknown/Invalid", /* MGMTD_DS_ID_MAX */
+};
+
+static struct mgmt_master *mgmt_ds_mm;
+static struct mgmt_ds_ctx running, candidate, oper;
+
+/* Dump the data tree of the specified format in the file pointed by the path */
+static int mgmt_ds_dump_in_memory(struct mgmt_ds_ctx *ds_ctx,
+ const char *base_xpath, LYD_FORMAT format,
+ struct ly_out *out)
+{
+ struct lyd_node *root;
+ uint32_t options = 0;
+
+ if (base_xpath[0] == '\0')
+ root = ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root;
+ else
+ root = yang_dnode_get(ds_ctx->config_ds
+ ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root,
+ base_xpath);
+ if (!root)
+ return -1;
+
+ options = ds_ctx->config_ds ? LYD_PRINT_WD_TRIM :
+ LYD_PRINT_WD_EXPLICIT;
+
+ if (base_xpath[0] == '\0')
+ lyd_print_all(out, root, format, options);
+ else
+ lyd_print_tree(out, root, format, options);
+
+ return 0;
+}
+
+static int mgmt_ds_replace_dst_with_src_ds(struct mgmt_ds_ctx *src,
+ struct mgmt_ds_ctx *dst)
+{
+ struct lyd_node *dst_dnode, *src_dnode;
+ struct ly_out *out;
+
+ if (!src || !dst)
+ return -1;
+ MGMTD_DS_DBG("Replacing %d with %d", dst->ds_id, src->ds_id);
+
+ src_dnode = src->config_ds ? src->root.cfg_root->dnode
+ : dst->root.dnode_root;
+ dst_dnode = dst->config_ds ? dst->root.cfg_root->dnode
+ : dst->root.dnode_root;
+
+ if (dst_dnode)
+ yang_dnode_free(dst_dnode);
+
+ /* Not using nb_config_replace as the oper ds does not contain nb_config
+ */
+ dst_dnode = yang_dnode_dup(src_dnode);
+ if (dst->config_ds)
+ dst->root.cfg_root->dnode = dst_dnode;
+ else
+ dst->root.dnode_root = dst_dnode;
+
+ if (src->ds_id == MGMTD_DS_CANDIDATE) {
+ /*
+ * Drop the changes in scratch-buffer.
+ */
+ MGMTD_DS_DBG("Emptying Candidate Scratch buffer!");
+ nb_config_diff_del_changes(&src->root.cfg_root->cfg_chgs);
+ }
+
+ if (dst->ds_id == MGMTD_DS_RUNNING) {
+ if (ly_out_new_filepath(MGMTD_STARTUP_DS_FILE_PATH, &out)
+ == LY_SUCCESS)
+ mgmt_ds_dump_in_memory(dst, "", LYD_JSON, out);
+ ly_out_free(out, NULL, 0);
+ }
+
+ /* TODO: Update the versions if nb_config present */
+
+ return 0;
+}
+
+static int mgmt_ds_merge_src_with_dst_ds(struct mgmt_ds_ctx *src,
+ struct mgmt_ds_ctx *dst)
+{
+ int ret;
+ struct lyd_node **dst_dnode, *src_dnode;
+ struct ly_out *out;
+
+ if (!src || !dst)
+ return -1;
+
+ MGMTD_DS_DBG("Merging DS %d with %d", dst->ds_id, src->ds_id);
+
+ src_dnode = src->config_ds ? src->root.cfg_root->dnode
+ : dst->root.dnode_root;
+ dst_dnode = dst->config_ds ? &dst->root.cfg_root->dnode
+ : &dst->root.dnode_root;
+ ret = lyd_merge_siblings(dst_dnode, src_dnode, 0);
+ if (ret != 0) {
+ MGMTD_DS_ERR("lyd_merge() failed with err %d", ret);
+ return ret;
+ }
+
+ if (src->ds_id == MGMTD_DS_CANDIDATE) {
+ /*
+ * Drop the changes in scratch-buffer.
+ */
+ MGMTD_DS_DBG("Emptying Candidate Scratch buffer!");
+ nb_config_diff_del_changes(&src->root.cfg_root->cfg_chgs);
+ }
+
+ if (dst->ds_id == MGMTD_DS_RUNNING) {
+ if (ly_out_new_filepath(MGMTD_STARTUP_DS_FILE_PATH, &out)
+ == LY_SUCCESS)
+ mgmt_ds_dump_in_memory(dst, "", LYD_JSON, out);
+ ly_out_free(out, NULL, 0);
+ }
+
+ return 0;
+}
+
+static int mgmt_ds_load_cfg_from_file(const char *filepath,
+ struct lyd_node **dnode)
+{
+ LY_ERR ret;
+
+ *dnode = NULL;
+ ret = lyd_parse_data_path(ly_native_ctx, filepath, LYD_JSON,
+ LYD_PARSE_STRICT, 0, dnode);
+
+ if (ret != LY_SUCCESS) {
+ if (*dnode)
+ yang_dnode_free(*dnode);
+ return -1;
+ }
+
+ return 0;
+}
+
+void mgmt_ds_reset_candidate(void)
+{
+ struct lyd_node *dnode = mm->candidate_ds->root.cfg_root->dnode;
+ if (dnode)
+ yang_dnode_free(dnode);
+
+ dnode = yang_dnode_new(ly_native_ctx, true);
+ mm->candidate_ds->root.cfg_root->dnode = dnode;
+}
+
+
+int mgmt_ds_init(struct mgmt_master *mm)
+{
+ struct lyd_node *root;
+
+ if (mgmt_ds_mm || mm->running_ds || mm->candidate_ds || mm->oper_ds)
+ assert(!"MGMTD: Call ds_init only once!");
+
+ /* Use Running DS from NB module??? */
+ if (!running_config)
+ assert(!"MGMTD: Call ds_init after frr_init only!");
+
+ if (mgmt_ds_load_cfg_from_file(MGMTD_STARTUP_DS_FILE_PATH, &root)
+ == 0) {
+ nb_config_free(running_config);
+ running_config = nb_config_new(root);
+ }
+
+ running.root.cfg_root = running_config;
+ running.config_ds = true;
+ running.ds_id = MGMTD_DS_RUNNING;
+
+ candidate.root.cfg_root = nb_config_dup(running.root.cfg_root);
+ candidate.config_ds = true;
+ candidate.ds_id = MGMTD_DS_CANDIDATE;
+
+ /*
+ * Redirect lib/vty candidate-config datastore to the global candidate
+ * config Ds on the MGMTD process.
+ */
+ vty_mgmt_candidate_config = candidate.root.cfg_root;
+
+ oper.root.dnode_root = yang_dnode_new(ly_native_ctx, true);
+ oper.config_ds = false;
+ oper.ds_id = MGMTD_DS_OPERATIONAL;
+
+ mm->running_ds = &running;
+ mm->candidate_ds = &candidate;
+ mm->oper_ds = &oper;
+ mgmt_ds_mm = mm;
+
+ return 0;
+}
+
+void mgmt_ds_destroy(void)
+{
+ /*
+ * TODO: Free the datastores.
+ */
+}
+
+struct mgmt_ds_ctx *mgmt_ds_get_ctx_by_id(struct mgmt_master *mm,
+ Mgmtd__DatastoreId ds_id)
+{
+ switch (ds_id) {
+ case MGMTD_DS_CANDIDATE:
+ return (mm->candidate_ds);
+ case MGMTD_DS_RUNNING:
+ return (mm->running_ds);
+ case MGMTD_DS_OPERATIONAL:
+ return (mm->oper_ds);
+ case MGMTD_DS_NONE:
+ case MGMTD__DATASTORE_ID__STARTUP_DS:
+ case _MGMTD__DATASTORE_ID_IS_INT_SIZE:
+ return 0;
+ }
+
+ return 0;
+}
+
+bool mgmt_ds_is_config(struct mgmt_ds_ctx *ds_ctx)
+{
+ if (!ds_ctx)
+ return false;
+
+ return ds_ctx->config_ds;
+}
+
+int mgmt_ds_read_lock(struct mgmt_ds_ctx *ds_ctx)
+{
+ if (!ds_ctx)
+ return EINVAL;
+ if (ds_ctx->lock < 0)
+ return EBUSY;
+ ++ds_ctx->lock;
+ return 0;
+}
+
+int mgmt_ds_write_lock(struct mgmt_ds_ctx *ds_ctx)
+{
+ if (!ds_ctx)
+ return EINVAL;
+ if (ds_ctx->lock != 0)
+ return EBUSY;
+ ds_ctx->lock = -1;
+ return 0;
+}
+
+int mgmt_ds_unlock(struct mgmt_ds_ctx *ds_ctx)
+{
+ if (!ds_ctx)
+ return EINVAL;
+ if (ds_ctx->lock > 0)
+ --ds_ctx->lock;
+ else if (ds_ctx->lock < 0) {
+ assert(ds_ctx->lock == -1);
+ ds_ctx->lock = 0;
+ } else {
+ assert(ds_ctx->lock != 0);
+ return EINVAL;
+ }
+ return 0;
+}
+
+int mgmt_ds_copy_dss(struct mgmt_ds_ctx *src_ds_ctx,
+ struct mgmt_ds_ctx *dst_ds_ctx, bool updt_cmt_rec)
+{
+ if (mgmt_ds_replace_dst_with_src_ds(src_ds_ctx, dst_ds_ctx) != 0)
+ return -1;
+
+ if (updt_cmt_rec && dst_ds_ctx->ds_id == MGMTD_DS_RUNNING)
+ mgmt_history_new_record(dst_ds_ctx);
+
+ return 0;
+}
+
+int mgmt_ds_dump_ds_to_file(char *file_name, struct mgmt_ds_ctx *ds_ctx)
+{
+ struct ly_out *out;
+ int ret = 0;
+
+ if (ly_out_new_filepath(file_name, &out) == LY_SUCCESS) {
+ ret = mgmt_ds_dump_in_memory(ds_ctx, "", LYD_JSON, out);
+ ly_out_free(out, NULL, 0);
+ }
+
+ return ret;
+}
+
+struct nb_config *mgmt_ds_get_nb_config(struct mgmt_ds_ctx *ds_ctx)
+{
+ if (!ds_ctx)
+ return NULL;
+
+ return ds_ctx->config_ds ? ds_ctx->root.cfg_root : NULL;
+}
+
+static int mgmt_walk_ds_nodes(
+ struct mgmt_ds_ctx *ds_ctx, char *base_xpath,
+ struct lyd_node *base_dnode,
+ void (*mgmt_ds_node_iter_fn)(struct mgmt_ds_ctx *ds_ctx, char *xpath,
+ struct lyd_node *node,
+ struct nb_node *nb_node, void *ctx),
+ void *ctx, char *xpaths[], int *num_nodes, bool childs_as_well,
+ bool alloc_xp_copy)
+{
+ uint32_t indx;
+ char *xpath, *xpath_buf, *iter_xp;
+ int ret, num_left = 0, num_found = 0;
+ struct lyd_node *dnode;
+ struct nb_node *nbnode;
+ bool alloc_xp = false;
+
+ if (xpaths)
+ assert(num_nodes);
+
+ if (num_nodes && !*num_nodes)
+ return 0;
+
+ if (num_nodes) {
+ num_left = *num_nodes;
+ MGMTD_DS_DBG(" -- START: num_left:%d", num_left);
+ *num_nodes = 0;
+ }
+
+ MGMTD_DS_DBG(" -- START: Base: %s", base_xpath);
+
+ if (!base_dnode)
+ base_dnode = yang_dnode_get(
+ ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root,
+ base_xpath);
+ if (!base_dnode)
+ return -1;
+
+ if (mgmt_ds_node_iter_fn) {
+ /*
+ * In case the caller is interested in getting a copy
+ * of the xpath for themselves (by setting
+ * 'alloc_xp_copy' to 'true') we make a copy for the
+ * caller and pass it. Else we pass the original xpath
+ * buffer.
+ *
+ * NOTE: In such case caller will have to take care of
+ * the copy later.
+ */
+ iter_xp = alloc_xp_copy ? strdup(base_xpath) : base_xpath;
+
+ nbnode = (struct nb_node *)base_dnode->schema->priv;
+ (*mgmt_ds_node_iter_fn)(ds_ctx, iter_xp, base_dnode, nbnode,
+ ctx);
+ }
+
+ if (num_nodes) {
+ (*num_nodes)++;
+ num_left--;
+ }
+
+ /*
+ * If the base_xpath points to a leaf node, or we don't need to
+ * visit any children we can skip the tree walk.
+ */
+ if (!childs_as_well || base_dnode->schema->nodetype & LYD_NODE_TERM)
+ return 0;
+
+ indx = 0;
+ LY_LIST_FOR (lyd_child(base_dnode), dnode) {
+ assert(dnode->schema && dnode->schema->priv);
+
+ xpath = NULL;
+ if (xpaths) {
+ if (!xpaths[*num_nodes]) {
+ alloc_xp = true;
+ xpaths[*num_nodes] =
+ (char *)calloc(1, MGMTD_MAX_XPATH_LEN);
+ }
+ xpath = lyd_path(dnode, LYD_PATH_STD,
+ xpaths[*num_nodes],
+ MGMTD_MAX_XPATH_LEN);
+ } else {
+ alloc_xp = true;
+ xpath_buf = (char *)calloc(1, MGMTD_MAX_XPATH_LEN);
+ (void) lyd_path(dnode, LYD_PATH_STD, xpath_buf,
+ MGMTD_MAX_XPATH_LEN);
+ xpath = xpath_buf;
+ }
+
+ assert(xpath);
+ MGMTD_DS_DBG(" -- XPATH: %s", xpath);
+
+ if (num_nodes)
+ num_found = num_left;
+
+ ret = mgmt_walk_ds_nodes(ds_ctx, xpath, dnode,
+ mgmt_ds_node_iter_fn, ctx,
+ xpaths ? &xpaths[*num_nodes] : NULL,
+ num_nodes ? &num_found : NULL,
+ childs_as_well, alloc_xp_copy);
+
+ if (num_nodes) {
+ num_left -= num_found;
+ (*num_nodes) += num_found;
+ }
+
+ if (alloc_xp)
+ free(xpath);
+
+ if (ret != 0)
+ break;
+
+ indx++;
+ }
+
+
+ if (num_nodes) {
+ MGMTD_DS_DBG(" -- END: *num_nodes:%d, num_left:%d", *num_nodes,
+ num_left);
+ }
+
+ return 0;
+}
+
+int mgmt_ds_lookup_data_nodes(struct mgmt_ds_ctx *ds_ctx, const char *xpath,
+ char *dxpaths[], int *num_nodes,
+ bool get_childs_as_well, bool alloc_xp_copy)
+{
+ char base_xpath[MGMTD_MAX_XPATH_LEN];
+
+ if (!ds_ctx || !num_nodes)
+ return -1;
+
+ if (xpath[0] == '.' && xpath[1] == '/')
+ xpath += 2;
+
+ strlcpy(base_xpath, xpath, sizeof(base_xpath));
+ mgmt_remove_trailing_separator(base_xpath, '/');
+
+ return (mgmt_walk_ds_nodes(ds_ctx, base_xpath, NULL, NULL, NULL,
+ dxpaths, num_nodes, get_childs_as_well,
+ alloc_xp_copy));
+}
+
+struct lyd_node *mgmt_ds_find_data_node_by_xpath(struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath)
+{
+ if (!ds_ctx)
+ return NULL;
+
+ return yang_dnode_get(ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root,
+ xpath);
+}
+
+int mgmt_ds_delete_data_nodes(struct mgmt_ds_ctx *ds_ctx, const char *xpath)
+{
+ struct nb_node *nb_node;
+ struct lyd_node *dnode, *dep_dnode;
+ char dep_xpath[XPATH_MAXLEN];
+
+ if (!ds_ctx)
+ return -1;
+
+ nb_node = nb_node_find(xpath);
+
+ dnode = yang_dnode_get(ds_ctx->config_ds
+ ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root,
+ xpath);
+
+ if (!dnode)
+ /*
+ * Return a special error code so the caller can choose
+ * whether to ignore it or not.
+ */
+ return NB_ERR_NOT_FOUND;
+ /* destroy dependant */
+ if (nb_node->dep_cbs.get_dependant_xpath) {
+ nb_node->dep_cbs.get_dependant_xpath(dnode, dep_xpath);
+
+ dep_dnode = yang_dnode_get(
+ ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root,
+ dep_xpath);
+ if (dep_dnode)
+ lyd_free_tree(dep_dnode);
+ }
+ lyd_free_tree(dnode);
+
+ return 0;
+}
+
+int mgmt_ds_load_config_from_file(struct mgmt_ds_ctx *dst,
+ const char *file_path, bool merge)
+{
+ struct lyd_node *iter;
+ struct mgmt_ds_ctx parsed;
+
+ if (!dst)
+ return -1;
+
+ if (mgmt_ds_load_cfg_from_file(file_path, &iter) != 0) {
+ MGMTD_DS_ERR("Failed to load config from the file %s",
+ file_path);
+ return -1;
+ }
+
+ parsed.root.cfg_root = nb_config_new(iter);
+ parsed.config_ds = true;
+ parsed.ds_id = dst->ds_id;
+
+ if (merge)
+ mgmt_ds_merge_src_with_dst_ds(&parsed, dst);
+ else
+ mgmt_ds_replace_dst_with_src_ds(&parsed, dst);
+
+ nb_config_free(parsed.root.cfg_root);
+
+ return 0;
+}
+
+int mgmt_ds_iter_data(struct mgmt_ds_ctx *ds_ctx, char *base_xpath,
+ void (*mgmt_ds_node_iter_fn)(struct mgmt_ds_ctx *ds_ctx,
+ char *xpath,
+ struct lyd_node *node,
+ struct nb_node *nb_node,
+ void *ctx),
+ void *ctx, bool alloc_xp_copy)
+{
+ int ret;
+ char xpath[MGMTD_MAX_XPATH_LEN];
+ struct lyd_node *base_dnode = NULL;
+ struct lyd_node *node;
+
+ if (!ds_ctx)
+ return -1;
+
+ mgmt_remove_trailing_separator(base_xpath, '/');
+
+ strlcpy(xpath, base_xpath, sizeof(xpath));
+
+ MGMTD_DS_DBG(" -- START DS walk for DSid: %d", ds_ctx->ds_id);
+
+ /* If the base_xpath is empty then crawl the sibblings */
+ if (xpath[0] == '\0') {
+ base_dnode = ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root;
+
+ /* get first top-level sibling */
+ while (base_dnode->parent)
+ base_dnode = lyd_parent(base_dnode);
+
+ while (base_dnode->prev->next)
+ base_dnode = base_dnode->prev;
+
+ LY_LIST_FOR (base_dnode, node) {
+ ret = mgmt_walk_ds_nodes(
+ ds_ctx, xpath, node, mgmt_ds_node_iter_fn,
+ ctx, NULL, NULL, true, alloc_xp_copy);
+ }
+ } else
+ ret = mgmt_walk_ds_nodes(ds_ctx, xpath, base_dnode,
+ mgmt_ds_node_iter_fn, ctx, NULL, NULL,
+ true, alloc_xp_copy);
+
+ return ret;
+}
+
+void mgmt_ds_dump_tree(struct vty *vty, struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath, FILE *f, LYD_FORMAT format)
+{
+ struct ly_out *out;
+ char *str;
+ char base_xpath[MGMTD_MAX_XPATH_LEN] = {0};
+
+ if (!ds_ctx) {
+ vty_out(vty, " >>>>> Datastore Not Initialized!\n");
+ return;
+ }
+
+ if (xpath) {
+ strlcpy(base_xpath, xpath, MGMTD_MAX_XPATH_LEN);
+ mgmt_remove_trailing_separator(base_xpath, '/');
+ }
+
+ if (f)
+ ly_out_new_file(f, &out);
+ else
+ ly_out_new_memory(&str, 0, &out);
+
+ mgmt_ds_dump_in_memory(ds_ctx, base_xpath, format, out);
+
+ if (!f)
+ vty_out(vty, "%s\n", str);
+
+ ly_out_free(out, NULL, 0);
+}
+
+void mgmt_ds_status_write_one(struct vty *vty, struct mgmt_ds_ctx *ds_ctx)
+{
+ if (!ds_ctx) {
+ vty_out(vty, " >>>>> Datastore Not Initialized!\n");
+ return;
+ }
+
+ vty_out(vty, " DS: %s\n", mgmt_ds_id2name(ds_ctx->ds_id));
+ vty_out(vty, " DS-Hndl: \t\t\t%p\n", ds_ctx);
+ vty_out(vty, " Config: \t\t\t%s\n",
+ ds_ctx->config_ds ? "True" : "False");
+}
+
+void mgmt_ds_status_write(struct vty *vty)
+{
+ vty_out(vty, "MGMTD Datastores\n");
+
+ mgmt_ds_status_write_one(vty, mgmt_ds_mm->running_ds);
+
+ mgmt_ds_status_write_one(vty, mgmt_ds_mm->candidate_ds);
+
+ mgmt_ds_status_write_one(vty, mgmt_ds_mm->oper_ds);
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Datastores
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_DS_H_
+#define _FRR_MGMTD_DS_H_
+
+#include "mgmt_fe_client.h"
+#include "northbound.h"
+
+#include "mgmtd/mgmt_defines.h"
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+
+#define MGMTD_MAX_NUM_DSNODES_PER_BATCH 128
+
+#define MGMTD_DS_NAME_MAX_LEN 32
+#define MGMTD_DS_NAME_NONE "none"
+#define MGMTD_DS_NAME_RUNNING "running"
+#define MGMTD_DS_NAME_CANDIDATE "candidate"
+#define MGMTD_DS_NAME_OPERATIONAL "operational"
+
+#define MGMTD_STARTUP_DS_FILE_PATH DAEMON_DB_DIR "/frr_startup.json"
+
+#define FOREACH_MGMTD_DS_ID(id) \
+ for ((id) = MGMTD_DS_NONE; (id) < MGMTD_DS_MAX_ID; (id)++)
+
+#define MGMTD_MAX_COMMIT_LIST 10
+#define MGMTD_MD5_HASH_LEN 16
+#define MGMTD_MD5_HASH_STR_HEX_LEN 33
+
+#define MGMTD_COMMIT_FILE_PATH DAEMON_DB_DIR "/commit-%s.json"
+#define MGMTD_COMMIT_INDEX_FILE_NAME DAEMON_DB_DIR "/commit-index.dat"
+#define MGMTD_COMMIT_TIME_STR_LEN 100
+
+extern struct nb_config *running_config;
+
+struct mgmt_ds_ctx;
+
+/***************************************************************
+ * Global data exported
+ ***************************************************************/
+
+extern const char *mgmt_ds_names[MGMTD_DS_MAX_ID + 1];
+
+/*
+ * Convert datastore ID to datastore name.
+ *
+ * id
+ * Datastore ID.
+ *
+ * Returns:
+ * Datastore name.
+ */
+static inline const char *mgmt_ds_id2name(Mgmtd__DatastoreId id)
+{
+ if (id > MGMTD_DS_MAX_ID)
+ id = MGMTD_DS_MAX_ID;
+ return mgmt_ds_names[id];
+}
+
+/*
+ * Convert datastore name to datastore ID.
+ *
+ * id
+ * Datastore name.
+ *
+ * Returns:
+ * Datastore ID.
+ */
+static inline Mgmtd__DatastoreId mgmt_ds_name2id(const char *name)
+{
+ Mgmtd__DatastoreId id;
+
+ FOREACH_MGMTD_DS_ID (id) {
+ if (!strncmp(mgmt_ds_names[id], name, MGMTD_DS_NAME_MAX_LEN))
+ return id;
+ }
+
+ return MGMTD_DS_NONE;
+}
+
+/*
+ * Convert datastore ID to datastore name.
+ *
+ * similar to above funtion.
+ */
+static inline Mgmtd__DatastoreId mgmt_get_ds_id_by_name(const char *ds_name)
+{
+ if (!strncmp(ds_name, "candidate", sizeof("candidate")))
+ return MGMTD_DS_CANDIDATE;
+ else if (!strncmp(ds_name, "running", sizeof("running")))
+ return MGMTD_DS_RUNNING;
+ else if (!strncmp(ds_name, "operational", sizeof("operational")))
+ return MGMTD_DS_OPERATIONAL;
+ return MGMTD_DS_NONE;
+}
+
+/*
+ * Appends trail wildcard '/' '*' to a given xpath.
+ *
+ * xpath
+ * YANG xpath.
+ *
+ * path_len
+ * xpath length.
+ */
+static inline void mgmt_xpath_append_trail_wildcard(char *xpath,
+ size_t *xpath_len)
+{
+ if (!xpath || !xpath_len)
+ return;
+
+ if (!*xpath_len)
+ *xpath_len = strlen(xpath);
+
+ if (*xpath_len > 2 && *xpath_len < MGMTD_MAX_XPATH_LEN - 2) {
+ if (xpath[*xpath_len - 1] == '/') {
+ xpath[*xpath_len] = '*';
+ xpath[*xpath_len + 1] = 0;
+ (*xpath_len)++;
+ } else if (xpath[*xpath_len - 1] != '*') {
+ xpath[*xpath_len] = '/';
+ xpath[*xpath_len + 1] = '*';
+ xpath[*xpath_len + 2] = 0;
+ (*xpath_len) += 2;
+ }
+ }
+}
+
+/*
+ * Removes trail wildcard '/' '*' from a given xpath.
+ *
+ * xpath
+ * YANG xpath.
+ *
+ * path_len
+ * xpath length.
+ */
+static inline void mgmt_xpath_remove_trail_wildcard(char *xpath,
+ size_t *xpath_len)
+{
+ if (!xpath || !xpath_len)
+ return;
+
+ if (!*xpath_len)
+ *xpath_len = strlen(xpath);
+
+ if (*xpath_len > 2 && xpath[*xpath_len - 2] == '/'
+ && xpath[*xpath_len - 1] == '*') {
+ xpath[*xpath_len - 2] = 0;
+ (*xpath_len) -= 2;
+ }
+}
+
+/* Initialise datastore */
+extern int mgmt_ds_init(struct mgmt_master *cm);
+
+/* Destroy datastore */
+extern void mgmt_ds_destroy(void);
+
+/*
+ * Get datastore handler by ID
+ *
+ * mm
+ * Management master structure.
+ *
+ * ds_id
+ * Datastore ID.
+ *
+ * Returns:
+ * Datastore context (Holds info about ID, lock, root node etc).
+ */
+extern struct mgmt_ds_ctx *mgmt_ds_get_ctx_by_id(struct mgmt_master *mm,
+ Mgmtd__DatastoreId ds_id);
+
+/*
+ * Check if a given datastore is config ds
+ */
+extern bool mgmt_ds_is_config(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Acquire read lock to a ds given a ds_handle
+ */
+extern int mgmt_ds_read_lock(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Acquire write lock to a ds given a ds_handle
+ */
+extern int mgmt_ds_write_lock(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Remove a lock from ds given a ds_handle
+ */
+extern int mgmt_ds_unlock(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Copy from source to destination datastore.
+ *
+ * src_ds
+ * Source datastore handle (ds to be copied from).
+ *
+ * dst_ds
+ * Destination datastore handle (ds to be copied to).
+ *
+ * update_cmd_rec
+ * TRUE if need to update commit record, FALSE otherwise.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_ds_copy_dss(struct mgmt_ds_ctx *src_ds_ctx,
+ struct mgmt_ds_ctx *dst_ds_ctx,
+ bool update_cmt_rec);
+
+/*
+ * Fetch northbound configuration for a given datastore context.
+ */
+extern struct nb_config *mgmt_ds_get_nb_config(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Lookup YANG data nodes.
+ *
+ * ds_ctx
+ * Datastore context.
+ *
+ * xpath
+ * YANG base xpath.
+ *
+ * dxpaths
+ * Out param - array of YANG data xpaths.
+ *
+ * num_nodes
+ * In-out param - number of YANG data xpaths.
+ * Note - Caller should init this to the size of the array
+ * provided in dxpaths.
+ * On return this will have the actual number of xpaths
+ * being returned.
+ *
+ * get_childs_as_well
+ * TRUE if child nodes needs to be fetched as well, FALSE otherwise.
+ *
+ * alloc_xp_copy
+ * TRUE if the caller is interested in getting a copy of the xpath.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_ds_lookup_data_nodes(struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath, char *dxpaths[],
+ int *num_nodes, bool get_childs_as_well,
+ bool alloc_xp_copy);
+
+/*
+ * Find YANG data node given a datastore handle YANG xpath.
+ */
+extern struct lyd_node *
+mgmt_ds_find_data_node_by_xpath(struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath);
+
+/*
+ * Delete YANG data node given a datastore handle and YANG xpath.
+ */
+extern int mgmt_ds_delete_data_nodes(struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath);
+
+/*
+ * Iterate over datastore data.
+ *
+ * ds_ctx
+ * Datastore context.
+ *
+ * base_xpath
+ * Base YANG xpath from where needs to be iterated.
+ *
+ * iter_fn
+ * function that will be called during each iteration.
+ *
+ * ctx
+ * User defined opaque value normally used to pass
+ * reference to some user private context that will
+ * be passed to the iterator function provided in
+ * 'iter_fn'.
+ *
+ * alloc_xp_copy
+ * TRUE if the caller is interested in getting a copy of the xpath.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_ds_iter_data(
+ struct mgmt_ds_ctx *ds_ctx, char *base_xpath,
+ void (*mgmt_ds_node_iter_fn)(struct mgmt_ds_ctx *ds_ctx, char *xpath,
+ struct lyd_node *node,
+ struct nb_node *nb_node, void *ctx),
+ void *ctx, bool alloc_xp_copy);
+
+/*
+ * Load config to datastore from a file.
+ *
+ * ds_ctx
+ * Datastore context.
+ *
+ * file_path
+ * File path of the configuration file.
+ *
+ * merge
+ * TRUE if you want to merge with existing config,
+ * FALSE if you want to replace with existing config
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_ds_load_config_from_file(struct mgmt_ds_ctx *ds_ctx,
+ const char *file_path, bool merge);
+
+/*
+ * Dump the data tree to a file with JSON/XML format.
+ *
+ * vty
+ * VTY context.
+ *
+ * ds_ctx
+ * Datastore context.
+ *
+ * xpath
+ * Base YANG xpath from where data needs to be dumped.
+ *
+ * f
+ * File pointer to where data to be dumped.
+ *
+ * format
+ * JSON/XML
+ */
+extern void mgmt_ds_dump_tree(struct vty *vty, struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath, FILE *f, LYD_FORMAT format);
+
+/*
+ * Dump the complete data tree to a file with JSON format.
+ *
+ * file_name
+ * File path to where data to be dumped.
+ *
+ * ds
+ * Datastore context.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_ds_dump_ds_to_file(char *file_name,
+ struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Dump information about specific datastore.
+ */
+extern void mgmt_ds_status_write_one(struct vty *vty,
+ struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Dump information about all the datastores.
+ */
+extern void mgmt_ds_status_write(struct vty *vty);
+
+
+/*
+ * Reset the candidate DS to empty state
+ */
+void mgmt_ds_reset_candidate(void);
+
+#endif /* _FRR_MGMTD_DS_H_ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Connection Adapter
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "sockopt.h"
+#include "network.h"
+#include "libfrr.h"
+#include "mgmt_fe_client.h"
+#include "mgmt_msg.h"
+#include "mgmt_pb.h"
+#include "hash.h"
+#include "jhash.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_FE_ADAPTER_DBG(fmt, ...) \
+ fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_FE_ADAPTER_ERR(fmt, ...) \
+ fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_FE_ADAPTER_DBG(fmt, ...) \
+ do { \
+ if (mgmt_debug_fe) \
+ zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define MGMTD_FE_ADAPTER_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+#define FOREACH_ADAPTER_IN_LIST(adapter) \
+ frr_each_safe (mgmt_fe_adapters, &mgmt_fe_adapters, (adapter))
+
+enum mgmt_session_event {
+ MGMTD_FE_SESSION_CFG_TXN_CLNUP = 1,
+ MGMTD_FE_SESSION_SHOW_TXN_CLNUP,
+};
+
+struct mgmt_fe_session_ctx {
+ struct mgmt_fe_client_adapter *adapter;
+ uint64_t session_id;
+ uint64_t client_id;
+ uint64_t txn_id;
+ uint64_t cfg_txn_id;
+ uint8_t ds_write_locked[MGMTD_DS_MAX_ID];
+ uint8_t ds_read_locked[MGMTD_DS_MAX_ID];
+ uint8_t ds_locked_implict[MGMTD_DS_MAX_ID];
+ struct event *proc_cfg_txn_clnp;
+ struct event *proc_show_txn_clnp;
+
+ struct mgmt_fe_sessions_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_fe_sessions, struct mgmt_fe_session_ctx, list_linkage);
+
+#define FOREACH_SESSION_IN_LIST(adapter, session) \
+ frr_each_safe (mgmt_fe_sessions, &(adapter)->fe_sessions, (session))
+
+static struct event_loop *mgmt_fe_adapter_tm;
+static struct mgmt_master *mgmt_fe_adapter_mm;
+
+static struct mgmt_fe_adapters_head mgmt_fe_adapters;
+
+static struct hash *mgmt_fe_sessions;
+static uint64_t mgmt_fe_next_session_id;
+
+/* Forward declarations */
+static void
+mgmt_fe_adapter_register_event(struct mgmt_fe_client_adapter *adapter,
+ enum mgmt_fe_event event);
+static void
+mgmt_fe_adapter_disconnect(struct mgmt_fe_client_adapter *adapter);
+static void
+mgmt_fe_session_register_event(struct mgmt_fe_session_ctx *session,
+ enum mgmt_session_event event);
+
+static int
+mgmt_fe_session_write_lock_ds(Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ struct mgmt_fe_session_ctx *session)
+{
+ if (!session->ds_write_locked[ds_id]) {
+ if (mgmt_ds_write_lock(ds_ctx) != 0) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to lock the DS %u for Sessn: %p from %s!",
+ ds_id, session, session->adapter->name);
+ return -1;
+ }
+
+ session->ds_write_locked[ds_id] = true;
+ MGMTD_FE_ADAPTER_DBG(
+ "Write-Locked the DS %u for Sessn: %p from %s!", ds_id,
+ session, session->adapter->name);
+ }
+
+ return 0;
+}
+
+static int
+mgmt_fe_session_read_lock_ds(Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ struct mgmt_fe_session_ctx *session)
+{
+ if (!session->ds_read_locked[ds_id]) {
+ if (mgmt_ds_read_lock(ds_ctx) != 0) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to lock the DS %u for Sessn: %p from %s!",
+ ds_id, session, session->adapter->name);
+ return -1;
+ }
+
+ session->ds_read_locked[ds_id] = true;
+ MGMTD_FE_ADAPTER_DBG(
+ "Read-Locked the DS %u for Sessn: %p from %s!", ds_id,
+ session, session->adapter->name);
+ }
+
+ return 0;
+}
+
+static int mgmt_fe_session_unlock_ds(Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ struct mgmt_fe_session_ctx *session,
+ bool unlock_write, bool unlock_read)
+{
+ if (unlock_write && session->ds_write_locked[ds_id]) {
+ session->ds_write_locked[ds_id] = false;
+ session->ds_locked_implict[ds_id] = false;
+ if (mgmt_ds_unlock(ds_ctx) != 0) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to unlock the DS %u taken earlier by Sessn: %p from %s!",
+ ds_id, session, session->adapter->name);
+ return -1;
+ }
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Unlocked DS %u write-locked earlier by Sessn: %p from %s",
+ ds_id, session, session->adapter->name);
+ } else if (unlock_read && session->ds_read_locked[ds_id]) {
+ session->ds_read_locked[ds_id] = false;
+ session->ds_locked_implict[ds_id] = false;
+ if (mgmt_ds_unlock(ds_ctx) != 0) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to unlock the DS %u taken earlier by Sessn: %p from %s!",
+ ds_id, session, session->adapter->name);
+ return -1;
+ }
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Unlocked DS %u read-locked earlier by Sessn: %p from %s",
+ ds_id, session, session->adapter->name);
+ }
+
+ return 0;
+}
+
+static void
+mgmt_fe_session_cfg_txn_cleanup(struct mgmt_fe_session_ctx *session)
+{
+ Mgmtd__DatastoreId ds_id;
+ struct mgmt_ds_ctx *ds_ctx;
+
+ /*
+ * Ensure any uncommitted changes in Candidate DS
+ * is discarded.
+ */
+ mgmt_ds_copy_dss(mm->running_ds, mm->candidate_ds, false);
+
+ for (ds_id = 0; ds_id < MGMTD_DS_MAX_ID; ds_id++) {
+ ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, ds_id);
+ if (ds_ctx) {
+ if (session->ds_locked_implict[ds_id])
+ mgmt_fe_session_unlock_ds(
+ ds_id, ds_ctx, session, true, false);
+ }
+ }
+
+ /*
+ * Destroy the actual transaction created earlier.
+ */
+ if (session->cfg_txn_id != MGMTD_TXN_ID_NONE)
+ mgmt_destroy_txn(&session->cfg_txn_id);
+}
+
+static void
+mgmt_fe_session_show_txn_cleanup(struct mgmt_fe_session_ctx *session)
+{
+ Mgmtd__DatastoreId ds_id;
+ struct mgmt_ds_ctx *ds_ctx;
+
+ for (ds_id = 0; ds_id < MGMTD_DS_MAX_ID; ds_id++) {
+ ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, ds_id);
+ if (ds_ctx) {
+ mgmt_fe_session_unlock_ds(ds_id, ds_ctx, session,
+ false, true);
+ }
+ }
+
+ /*
+ * Destroy the transaction created recently.
+ */
+ if (session->txn_id != MGMTD_TXN_ID_NONE)
+ mgmt_destroy_txn(&session->txn_id);
+}
+
+static void
+mgmt_fe_adapter_compute_set_cfg_timers(struct mgmt_setcfg_stats *setcfg_stats)
+{
+ setcfg_stats->last_exec_tm = timeval_elapsed(setcfg_stats->last_end,
+ setcfg_stats->last_start);
+ if (setcfg_stats->last_exec_tm > setcfg_stats->max_tm)
+ setcfg_stats->max_tm = setcfg_stats->last_exec_tm;
+
+ if (setcfg_stats->last_exec_tm < setcfg_stats->min_tm)
+ setcfg_stats->min_tm = setcfg_stats->last_exec_tm;
+
+ setcfg_stats->avg_tm =
+ (((setcfg_stats->avg_tm * (setcfg_stats->set_cfg_count - 1))
+ + setcfg_stats->last_exec_tm)
+ / setcfg_stats->set_cfg_count);
+}
+
+static void
+mgmt_fe_session_compute_commit_timers(struct mgmt_commit_stats *cmt_stats)
+{
+ cmt_stats->last_exec_tm =
+ timeval_elapsed(cmt_stats->last_end, cmt_stats->last_start);
+ if (cmt_stats->last_exec_tm > cmt_stats->max_tm) {
+ cmt_stats->max_tm = cmt_stats->last_exec_tm;
+ cmt_stats->max_batch_cnt = cmt_stats->last_batch_cnt;
+ }
+
+ if (cmt_stats->last_exec_tm < cmt_stats->min_tm) {
+ cmt_stats->min_tm = cmt_stats->last_exec_tm;
+ cmt_stats->min_batch_cnt = cmt_stats->last_batch_cnt;
+ }
+}
+
+static void mgmt_fe_cleanup_session(struct mgmt_fe_session_ctx **session)
+{
+ if ((*session)->adapter) {
+ mgmt_fe_session_cfg_txn_cleanup((*session));
+ mgmt_fe_session_show_txn_cleanup((*session));
+ mgmt_fe_session_unlock_ds(MGMTD_DS_CANDIDATE,
+ mgmt_fe_adapter_mm->candidate_ds,
+ *session, true, true);
+ mgmt_fe_session_unlock_ds(MGMTD_DS_RUNNING,
+ mgmt_fe_adapter_mm->running_ds,
+ *session, true, true);
+
+ mgmt_fe_sessions_del(&(*session)->adapter->fe_sessions,
+ *session);
+ mgmt_fe_adapter_unlock(&(*session)->adapter);
+ }
+
+ hash_release(mgmt_fe_sessions, *session);
+ XFREE(MTYPE_MGMTD_FE_SESSION, *session);
+ *session = NULL;
+}
+
+static struct mgmt_fe_session_ctx *
+mgmt_fe_find_session_by_client_id(struct mgmt_fe_client_adapter *adapter,
+ uint64_t client_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ FOREACH_SESSION_IN_LIST (adapter, session) {
+ if (session->client_id == client_id)
+ return session;
+ }
+
+ return NULL;
+}
+
+static unsigned int mgmt_fe_session_hash_key(const void *data)
+{
+ const struct mgmt_fe_session_ctx *session = data;
+
+ return jhash2((uint32_t *) &session->session_id,
+ sizeof(session->session_id) / sizeof(uint32_t), 0);
+}
+
+static bool mgmt_fe_session_hash_cmp(const void *d1, const void *d2)
+{
+ const struct mgmt_fe_session_ctx *session1 = d1;
+ const struct mgmt_fe_session_ctx *session2 = d2;
+
+ return (session1->session_id == session2->session_id);
+}
+
+static void mgmt_fe_session_hash_free(void *data)
+{
+ struct mgmt_fe_session_ctx *session = data;
+
+ mgmt_fe_cleanup_session(&session);
+}
+
+static void mgmt_fe_session_hash_destroy(void)
+{
+ if (mgmt_fe_sessions == NULL)
+ return;
+
+ hash_clean(mgmt_fe_sessions,
+ mgmt_fe_session_hash_free);
+ hash_free(mgmt_fe_sessions);
+ mgmt_fe_sessions = NULL;
+}
+
+static inline struct mgmt_fe_session_ctx *
+mgmt_session_id2ctx(uint64_t session_id)
+{
+ struct mgmt_fe_session_ctx key = {0};
+ struct mgmt_fe_session_ctx *session;
+
+ if (!mgmt_fe_sessions)
+ return NULL;
+
+ key.session_id = session_id;
+ session = hash_lookup(mgmt_fe_sessions, &key);
+
+ return session;
+}
+
+static struct mgmt_fe_session_ctx *
+mgmt_fe_create_session(struct mgmt_fe_client_adapter *adapter,
+ uint64_t client_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_fe_find_session_by_client_id(adapter, client_id);
+ if (session)
+ mgmt_fe_cleanup_session(&session);
+
+ session = XCALLOC(MTYPE_MGMTD_FE_SESSION,
+ sizeof(struct mgmt_fe_session_ctx));
+ assert(session);
+ session->client_id = client_id;
+ session->adapter = adapter;
+ session->txn_id = MGMTD_TXN_ID_NONE;
+ session->cfg_txn_id = MGMTD_TXN_ID_NONE;
+ mgmt_fe_adapter_lock(adapter);
+ mgmt_fe_sessions_add_tail(&adapter->fe_sessions, session);
+ if (!mgmt_fe_next_session_id)
+ mgmt_fe_next_session_id++;
+ session->session_id = mgmt_fe_next_session_id++;
+ hash_get(mgmt_fe_sessions, session, hash_alloc_intern);
+
+ return session;
+}
+
+static void
+mgmt_fe_cleanup_sessions(struct mgmt_fe_client_adapter *adapter)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ FOREACH_SESSION_IN_LIST (adapter, session)
+ mgmt_fe_cleanup_session(&session);
+}
+
+static inline void
+mgmt_fe_adapter_sched_msg_write(struct mgmt_fe_client_adapter *adapter)
+{
+ if (!CHECK_FLAG(adapter->flags, MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF))
+ mgmt_fe_adapter_register_event(adapter,
+ MGMTD_FE_CONN_WRITE);
+}
+
+static inline void
+mgmt_fe_adapter_writes_on(struct mgmt_fe_client_adapter *adapter)
+{
+ MGMTD_FE_ADAPTER_DBG("Resume writing msgs for '%s'", adapter->name);
+ UNSET_FLAG(adapter->flags, MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF);
+ mgmt_fe_adapter_sched_msg_write(adapter);
+}
+
+static inline void
+mgmt_fe_adapter_writes_off(struct mgmt_fe_client_adapter *adapter)
+{
+ SET_FLAG(adapter->flags, MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF);
+ MGMTD_FE_ADAPTER_DBG("Paused writing msgs for '%s'", adapter->name);
+}
+
+static int
+mgmt_fe_adapter_send_msg(struct mgmt_fe_client_adapter *adapter,
+ Mgmtd__FeMessage *fe_msg)
+{
+ if (adapter->conn_fd == -1) {
+ MGMTD_FE_ADAPTER_DBG("can't send message on closed connection");
+ return -1;
+ }
+
+ int rv = mgmt_msg_send_msg(
+ &adapter->mstate, fe_msg,
+ mgmtd__fe_message__get_packed_size(fe_msg),
+ (size_t(*)(void *, void *))mgmtd__fe_message__pack,
+ mgmt_debug_fe);
+ mgmt_fe_adapter_sched_msg_write(adapter);
+ return rv;
+}
+
+static int
+mgmt_fe_send_session_reply(struct mgmt_fe_client_adapter *adapter,
+ struct mgmt_fe_session_ctx *session,
+ bool create, bool success)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeSessionReply session_reply;
+
+ mgmtd__fe_session_reply__init(&session_reply);
+ session_reply.create = create;
+ if (create) {
+ session_reply.has_client_conn_id = 1;
+ session_reply.client_conn_id = session->client_id;
+ }
+ session_reply.session_id = session->session_id;
+ session_reply.success = success;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SESSION_REPLY;
+ fe_msg.session_reply = &session_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending SESSION_REPLY message to MGMTD Frontend client '%s'",
+ adapter->name);
+
+ return mgmt_fe_adapter_send_msg(adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_lockds_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, bool lock_ds,
+ bool success, const char *error_if_any)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeLockDsReply lockds_reply;
+
+ assert(session->adapter);
+
+ mgmtd__fe_lock_ds_reply__init(&lockds_reply);
+ lockds_reply.session_id = session->session_id;
+ lockds_reply.ds_id = ds_id;
+ lockds_reply.req_id = req_id;
+ lockds_reply.lock = lock_ds;
+ lockds_reply.success = success;
+ if (error_if_any)
+ lockds_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY;
+ fe_msg.lockds_reply = &lockds_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending LOCK_DS_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_setcfg_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, bool success,
+ const char *error_if_any,
+ bool implicit_commit)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeSetConfigReply setcfg_reply;
+
+ assert(session->adapter);
+
+ if (implicit_commit && session->cfg_txn_id)
+ mgmt_fe_session_register_event(
+ session, MGMTD_FE_SESSION_CFG_TXN_CLNUP);
+
+ mgmtd__fe_set_config_reply__init(&setcfg_reply);
+ setcfg_reply.session_id = session->session_id;
+ setcfg_reply.ds_id = ds_id;
+ setcfg_reply.req_id = req_id;
+ setcfg_reply.success = success;
+ if (error_if_any)
+ setcfg_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY;
+ fe_msg.setcfg_reply = &setcfg_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending SET_CONFIG_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ if (implicit_commit) {
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->cmt_stats.last_end, NULL);
+ mgmt_fe_session_compute_commit_timers(
+ &session->adapter->cmt_stats);
+ }
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->setcfg_stats.last_end, NULL);
+ mgmt_fe_adapter_compute_set_cfg_timers(&session->adapter->setcfg_stats);
+
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_commitcfg_reply(
+ struct mgmt_fe_session_ctx *session, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id, uint64_t req_id, enum mgmt_result result,
+ bool validate_only, const char *error_if_any)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeCommitConfigReply commcfg_reply;
+
+ assert(session->adapter);
+
+ mgmtd__fe_commit_config_reply__init(&commcfg_reply);
+ commcfg_reply.session_id = session->session_id;
+ commcfg_reply.src_ds_id = src_ds_id;
+ commcfg_reply.dst_ds_id = dst_ds_id;
+ commcfg_reply.req_id = req_id;
+ commcfg_reply.success =
+ (result == MGMTD_SUCCESS || result == MGMTD_NO_CFG_CHANGES)
+ ? true
+ : false;
+ commcfg_reply.validate_only = validate_only;
+ if (error_if_any)
+ commcfg_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY;
+ fe_msg.commcfg_reply = &commcfg_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending COMMIT_CONFIG_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ /*
+ * Cleanup the CONFIG transaction associated with this session.
+ */
+ if (session->cfg_txn_id
+ && ((result == MGMTD_SUCCESS && !validate_only)
+ || (result == MGMTD_NO_CFG_CHANGES)))
+ mgmt_fe_session_register_event(
+ session, MGMTD_FE_SESSION_CFG_TXN_CLNUP);
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->cmt_stats.last_end, NULL);
+ mgmt_fe_session_compute_commit_timers(&session->adapter->cmt_stats);
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_getcfg_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, bool success,
+ Mgmtd__YangDataReply *data,
+ const char *error_if_any)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeGetConfigReply getcfg_reply;
+
+ assert(session->adapter);
+
+ mgmtd__fe_get_config_reply__init(&getcfg_reply);
+ getcfg_reply.session_id = session->session_id;
+ getcfg_reply.ds_id = ds_id;
+ getcfg_reply.req_id = req_id;
+ getcfg_reply.success = success;
+ getcfg_reply.data = data;
+ if (error_if_any)
+ getcfg_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REPLY;
+ fe_msg.getcfg_reply = &getcfg_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending GET_CONFIG_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ /*
+ * Cleanup the SHOW transaction associated with this session.
+ */
+ if (session->txn_id && (!success || (data && data->next_indx < 0)))
+ mgmt_fe_session_register_event(
+ session, MGMTD_FE_SESSION_SHOW_TXN_CLNUP);
+
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_getdata_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, bool success,
+ Mgmtd__YangDataReply *data,
+ const char *error_if_any)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeGetDataReply getdata_reply;
+
+ assert(session->adapter);
+
+ mgmtd__fe_get_data_reply__init(&getdata_reply);
+ getdata_reply.session_id = session->session_id;
+ getdata_reply.ds_id = ds_id;
+ getdata_reply.req_id = req_id;
+ getdata_reply.success = success;
+ getdata_reply.data = data;
+ if (error_if_any)
+ getdata_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REPLY;
+ fe_msg.getdata_reply = &getdata_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending GET_DATA_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ /*
+ * Cleanup the SHOW transaction associated with this session.
+ */
+ if (session->txn_id && (!success || (data && data->next_indx < 0)))
+ mgmt_fe_session_register_event(
+ session, MGMTD_FE_SESSION_SHOW_TXN_CLNUP);
+
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static void mgmt_fe_session_cfg_txn_clnup(struct event *thread)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = (struct mgmt_fe_session_ctx *)EVENT_ARG(thread);
+
+ mgmt_fe_session_cfg_txn_cleanup(session);
+}
+
+static void mgmt_fe_session_show_txn_clnup(struct event *thread)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = (struct mgmt_fe_session_ctx *)EVENT_ARG(thread);
+
+ mgmt_fe_session_show_txn_cleanup(session);
+}
+
+static void
+mgmt_fe_session_register_event(struct mgmt_fe_session_ctx *session,
+ enum mgmt_session_event event)
+{
+ struct timeval tv = {.tv_sec = 0,
+ .tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC};
+
+ switch (event) {
+ case MGMTD_FE_SESSION_CFG_TXN_CLNUP:
+ event_add_timer_tv(mgmt_fe_adapter_tm,
+ mgmt_fe_session_cfg_txn_clnup, session,
+ &tv, &session->proc_cfg_txn_clnp);
+ break;
+ case MGMTD_FE_SESSION_SHOW_TXN_CLNUP:
+ event_add_timer_tv(mgmt_fe_adapter_tm,
+ mgmt_fe_session_show_txn_clnup, session,
+ &tv, &session->proc_show_txn_clnp);
+ break;
+ }
+}
+
+static struct mgmt_fe_client_adapter *
+mgmt_fe_find_adapter_by_fd(int conn_fd)
+{
+ struct mgmt_fe_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ if (adapter->conn_fd == conn_fd)
+ return adapter;
+ }
+
+ return NULL;
+}
+
+static struct mgmt_fe_client_adapter *
+mgmt_fe_find_adapter_by_name(const char *name)
+{
+ struct mgmt_fe_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ if (!strncmp(adapter->name, name, sizeof(adapter->name)))
+ return adapter;
+ }
+
+ return NULL;
+}
+
+static void mgmt_fe_adapter_disconnect(struct mgmt_fe_client_adapter *adapter)
+{
+ if (adapter->conn_fd >= 0) {
+ close(adapter->conn_fd);
+ adapter->conn_fd = -1;
+ }
+
+ /* TODO: notify about client disconnect for appropriate cleanup */
+ mgmt_fe_cleanup_sessions(adapter);
+ mgmt_fe_sessions_fini(&adapter->fe_sessions);
+ mgmt_fe_adapters_del(&mgmt_fe_adapters, adapter);
+
+ mgmt_fe_adapter_unlock(&adapter);
+}
+
+static void
+mgmt_fe_adapter_cleanup_old_conn(struct mgmt_fe_client_adapter *adapter)
+{
+ struct mgmt_fe_client_adapter *old;
+
+ FOREACH_ADAPTER_IN_LIST (old) {
+ if (old != adapter
+ && !strncmp(adapter->name, old->name, sizeof(adapter->name))) {
+ /*
+ * We have a Zombie lingering around
+ */
+ MGMTD_FE_ADAPTER_DBG(
+ "Client '%s' (FD:%d) seems to have reconnected. Removing old connection (FD:%d)!",
+ adapter->name, adapter->conn_fd, old->conn_fd);
+ mgmt_fe_adapter_disconnect(old);
+ }
+ }
+}
+
+static void
+mgmt_fe_cleanup_adapters(void)
+{
+ struct mgmt_fe_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ mgmt_fe_cleanup_sessions(adapter);
+ mgmt_fe_adapter_unlock(&adapter);
+ }
+}
+
+static int
+mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeLockDsReq *lockds_req)
+{
+ struct mgmt_ds_ctx *ds_ctx;
+
+ /*
+ * Next check first if the SET_CONFIG_REQ is for Candidate DS
+ * or not. Report failure if its not. MGMTD currently only
+ * supports editing the Candidate DS.
+ */
+ if (lockds_req->ds_id != MGMTD_DS_CANDIDATE) {
+ mgmt_fe_send_lockds_reply(
+ session, lockds_req->ds_id, lockds_req->req_id,
+ lockds_req->lock, false,
+ "Lock/Unlock on datastores other than Candidate DS not permitted!");
+ return -1;
+ }
+
+ ds_ctx =
+ mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, lockds_req->ds_id);
+ if (!ds_ctx) {
+ mgmt_fe_send_lockds_reply(
+ session, lockds_req->ds_id, lockds_req->req_id,
+ lockds_req->lock, false,
+ "Failed to retrieve handle for DS!");
+ return -1;
+ }
+
+ if (lockds_req->lock) {
+ if (mgmt_fe_session_write_lock_ds(lockds_req->ds_id,
+ ds_ctx, session)
+ != 0) {
+ mgmt_fe_send_lockds_reply(
+ session, lockds_req->ds_id, lockds_req->req_id,
+ lockds_req->lock, false,
+ "Lock already taken on DS by another session!");
+ return -1;
+ }
+
+ session->ds_locked_implict[lockds_req->ds_id] = false;
+ } else {
+ if (!session->ds_write_locked[lockds_req->ds_id]) {
+ mgmt_fe_send_lockds_reply(
+ session, lockds_req->ds_id, lockds_req->req_id,
+ lockds_req->lock, false,
+ "Lock on DS was not taken by this session!");
+ return 0;
+ }
+
+ (void)mgmt_fe_session_unlock_ds(lockds_req->ds_id, ds_ctx,
+ session, true, false);
+ }
+
+ if (mgmt_fe_send_lockds_reply(session, lockds_req->ds_id,
+ lockds_req->req_id, lockds_req->lock,
+ true, NULL)
+ != 0) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to send LOCK_DS_REPLY for DS %u Sessn: %p from %s",
+ lockds_req->ds_id, session, session->adapter->name);
+ }
+
+ return 0;
+}
+
+static int
+mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeSetConfigReq *setcfg_req)
+{
+ uint64_t cfg_session_id;
+ struct mgmt_ds_ctx *ds_ctx, *dst_ds_ctx;
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->setcfg_stats.last_start, NULL);
+
+ /*
+ * Next check first if the SET_CONFIG_REQ is for Candidate DS
+ * or not. Report failure if its not. MGMTD currently only
+ * supports editing the Candidate DS.
+ */
+ if (setcfg_req->ds_id != MGMTD_DS_CANDIDATE) {
+ mgmt_fe_send_setcfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id, false,
+ "Set-Config on datastores other than Candidate DS not permitted!",
+ setcfg_req->implicit_commit);
+ return 0;
+ }
+
+ /*
+ * Get the DS handle.
+ */
+ ds_ctx =
+ mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, setcfg_req->ds_id);
+ if (!ds_ctx) {
+ mgmt_fe_send_setcfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id, false,
+ "No such DS exists!", setcfg_req->implicit_commit);
+ return 0;
+ }
+
+ if (session->cfg_txn_id == MGMTD_TXN_ID_NONE) {
+ /*
+ * Check first if the current session can run a CONFIG
+ * transaction or not. Report failure if a CONFIG transaction
+ * from another session is already in progress.
+ */
+ cfg_session_id = mgmt_config_txn_in_progress();
+ if (cfg_session_id != MGMTD_SESSION_ID_NONE
+ && cfg_session_id != session->session_id) {
+ mgmt_fe_send_setcfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id,
+ false,
+ "Configuration already in-progress through a different user session!",
+ setcfg_req->implicit_commit);
+ goto mgmt_fe_sess_handle_setcfg_req_failed;
+ }
+
+
+ /*
+ * Try taking write-lock on the requested DS (if not already).
+ */
+ if (!session->ds_write_locked[setcfg_req->ds_id]) {
+ if (mgmt_fe_session_write_lock_ds(setcfg_req->ds_id,
+ ds_ctx, session)
+ != 0) {
+ mgmt_fe_send_setcfg_reply(
+ session, setcfg_req->ds_id,
+ setcfg_req->req_id, false,
+ "Failed to lock the DS!",
+ setcfg_req->implicit_commit);
+ goto mgmt_fe_sess_handle_setcfg_req_failed;
+ }
+
+ session->ds_locked_implict[setcfg_req->ds_id] = true;
+ }
+
+ /*
+ * Start a CONFIG Transaction (if not started already)
+ */
+ session->cfg_txn_id = mgmt_create_txn(session->session_id,
+ MGMTD_TXN_TYPE_CONFIG);
+ if (session->cfg_txn_id == MGMTD_SESSION_ID_NONE) {
+ mgmt_fe_send_setcfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id,
+ false,
+ "Failed to create a Configuration session!",
+ setcfg_req->implicit_commit);
+ goto mgmt_fe_sess_handle_setcfg_req_failed;
+ }
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Created new Config Txn 0x%llx for session %p",
+ (unsigned long long)session->cfg_txn_id, session);
+ } else {
+ MGMTD_FE_ADAPTER_DBG(
+ "Config Txn 0x%llx for session %p already created",
+ (unsigned long long)session->cfg_txn_id, session);
+
+ if (setcfg_req->implicit_commit) {
+ /*
+ * In this scenario need to skip cleanup of the txn,
+ * so setting implicit commit to false.
+ */
+ mgmt_fe_send_setcfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id,
+ false,
+ "A Configuration transaction is already in progress!",
+ false);
+ return 0;
+ }
+ }
+
+ dst_ds_ctx = 0;
+ if (setcfg_req->implicit_commit) {
+ dst_ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
+ setcfg_req->commit_ds_id);
+ if (!dst_ds_ctx) {
+ mgmt_fe_send_setcfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id,
+ false, "No such commit DS exists!",
+ setcfg_req->implicit_commit);
+ return 0;
+ }
+ }
+
+ /*
+ * Create the SETConfig request under the transaction.
+ */
+ if (mgmt_txn_send_set_config_req(
+ session->cfg_txn_id, setcfg_req->req_id, setcfg_req->ds_id,
+ ds_ctx, setcfg_req->data, setcfg_req->n_data,
+ setcfg_req->implicit_commit, setcfg_req->commit_ds_id,
+ dst_ds_ctx)
+ != 0) {
+ mgmt_fe_send_setcfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id, false,
+ "Request processing for SET-CONFIG failed!",
+ setcfg_req->implicit_commit);
+ goto mgmt_fe_sess_handle_setcfg_req_failed;
+ }
+
+ return 0;
+
+mgmt_fe_sess_handle_setcfg_req_failed:
+
+ /*
+ * Delete transaction created recently.
+ */
+ if (session->cfg_txn_id != MGMTD_TXN_ID_NONE)
+ mgmt_destroy_txn(&session->cfg_txn_id);
+ if (ds_ctx && session->ds_write_locked[setcfg_req->ds_id])
+ mgmt_fe_session_unlock_ds(setcfg_req->ds_id, ds_ctx, session,
+ true, false);
+
+ return 0;
+}
+
+static int
+mgmt_fe_session_handle_getcfg_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeGetConfigReq *getcfg_req)
+{
+ struct mgmt_ds_ctx *ds_ctx;
+
+ /*
+ * Get the DS handle.
+ */
+ ds_ctx =
+ mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, getcfg_req->ds_id);
+ if (!ds_ctx) {
+ mgmt_fe_send_getcfg_reply(session, getcfg_req->ds_id,
+ getcfg_req->req_id, false, NULL,
+ "No such DS exists!");
+ return 0;
+ }
+
+ /*
+ * Next check first if the SET_CONFIG_REQ is for Candidate DS
+ * or not. Report failure if its not. MGMTD currently only
+ * supports editing the Candidate DS.
+ */
+ if (getcfg_req->ds_id != MGMTD_DS_CANDIDATE
+ && getcfg_req->ds_id != MGMTD_DS_RUNNING) {
+ mgmt_fe_send_getcfg_reply(
+ session, getcfg_req->ds_id, getcfg_req->req_id, false,
+ NULL,
+ "Get-Config on datastores other than Candidate or Running DS not permitted!");
+ return 0;
+ }
+
+ if (session->txn_id == MGMTD_TXN_ID_NONE) {
+ /*
+ * Try taking read-lock on the requested DS (if not already
+ * locked). If the DS has already been write-locked by a ongoing
+ * CONFIG transaction we may allow reading the contents of the
+ * same DS.
+ */
+ if (!session->ds_read_locked[getcfg_req->ds_id]
+ && !session->ds_write_locked[getcfg_req->ds_id]) {
+ if (mgmt_fe_session_read_lock_ds(getcfg_req->ds_id,
+ ds_ctx, session)
+ != 0) {
+ mgmt_fe_send_getcfg_reply(
+ session, getcfg_req->ds_id,
+ getcfg_req->req_id, false, NULL,
+ "Failed to lock the DS! Another session might have locked it!");
+ goto mgmt_fe_sess_handle_getcfg_req_failed;
+ }
+
+ session->ds_locked_implict[getcfg_req->ds_id] = true;
+ }
+
+ /*
+ * Start a SHOW Transaction (if not started already)
+ */
+ session->txn_id = mgmt_create_txn(session->session_id,
+ MGMTD_TXN_TYPE_SHOW);
+ if (session->txn_id == MGMTD_SESSION_ID_NONE) {
+ mgmt_fe_send_getcfg_reply(
+ session, getcfg_req->ds_id, getcfg_req->req_id,
+ false, NULL,
+ "Failed to create a Show transaction!");
+ goto mgmt_fe_sess_handle_getcfg_req_failed;
+ }
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Created new Show Txn 0x%llx for session %p",
+ (unsigned long long)session->txn_id, session);
+ } else {
+ MGMTD_FE_ADAPTER_DBG(
+ "Show Txn 0x%llx for session %p already created",
+ (unsigned long long)session->txn_id, session);
+ }
+
+ /*
+ * Create a GETConfig request under the transaction.
+ */
+ if (mgmt_txn_send_get_config_req(session->txn_id, getcfg_req->req_id,
+ getcfg_req->ds_id, ds_ctx,
+ getcfg_req->data, getcfg_req->n_data)
+ != 0) {
+ mgmt_fe_send_getcfg_reply(
+ session, getcfg_req->ds_id, getcfg_req->req_id, false,
+ NULL, "Request processing for GET-CONFIG failed!");
+ goto mgmt_fe_sess_handle_getcfg_req_failed;
+ }
+
+ return 0;
+
+mgmt_fe_sess_handle_getcfg_req_failed:
+
+ /*
+ * Destroy the transaction created recently.
+ */
+ if (session->txn_id != MGMTD_TXN_ID_NONE)
+ mgmt_destroy_txn(&session->txn_id);
+ if (ds_ctx && session->ds_read_locked[getcfg_req->ds_id])
+ mgmt_fe_session_unlock_ds(getcfg_req->ds_id, ds_ctx, session,
+ false, true);
+
+ return -1;
+}
+
+static int
+mgmt_fe_session_handle_getdata_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeGetDataReq *getdata_req)
+{
+ struct mgmt_ds_ctx *ds_ctx;
+
+ /*
+ * Get the DS handle.
+ */
+ ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
+ getdata_req->ds_id);
+ if (!ds_ctx) {
+ mgmt_fe_send_getdata_reply(session, getdata_req->ds_id,
+ getdata_req->req_id, false, NULL,
+ "No such DS exists!");
+ return 0;
+ }
+
+ if (session->txn_id == MGMTD_TXN_ID_NONE) {
+ /*
+ * Try taking read-lock on the requested DS (if not already
+ * locked). If the DS has already been write-locked by a ongoing
+ * CONFIG transaction we may allow reading the contents of the
+ * same DS.
+ */
+ if (!session->ds_read_locked[getdata_req->ds_id]
+ && !session->ds_write_locked[getdata_req->ds_id]) {
+ if (mgmt_fe_session_read_lock_ds(getdata_req->ds_id,
+ ds_ctx, session)
+ != 0) {
+ mgmt_fe_send_getdata_reply(
+ session, getdata_req->ds_id,
+ getdata_req->req_id, false, NULL,
+ "Failed to lock the DS! Another session might have locked it!");
+ goto mgmt_fe_sess_handle_getdata_req_failed;
+ }
+
+ session->ds_locked_implict[getdata_req->ds_id] = true;
+ }
+
+ /*
+ * Start a SHOW Transaction (if not started already)
+ */
+ session->txn_id = mgmt_create_txn(session->session_id,
+ MGMTD_TXN_TYPE_SHOW);
+ if (session->txn_id == MGMTD_SESSION_ID_NONE) {
+ mgmt_fe_send_getdata_reply(
+ session, getdata_req->ds_id, getdata_req->req_id,
+ false, NULL,
+ "Failed to create a Show transaction!");
+ goto mgmt_fe_sess_handle_getdata_req_failed;
+ }
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Created new Show Txn 0x%llx for session %p",
+ (unsigned long long)session->txn_id, session);
+ } else {
+ MGMTD_FE_ADAPTER_DBG(
+ "Show Txn 0x%llx for session %p already created",
+ (unsigned long long)session->txn_id, session);
+ }
+
+ /*
+ * Create a GETData request under the transaction.
+ */
+ if (mgmt_txn_send_get_data_req(session->txn_id, getdata_req->req_id,
+ getdata_req->ds_id, ds_ctx,
+ getdata_req->data, getdata_req->n_data)
+ != 0) {
+ mgmt_fe_send_getdata_reply(
+ session, getdata_req->ds_id, getdata_req->req_id, false,
+ NULL, "Request processing for GET-CONFIG failed!");
+ goto mgmt_fe_sess_handle_getdata_req_failed;
+ }
+
+ return 0;
+
+mgmt_fe_sess_handle_getdata_req_failed:
+
+ /*
+ * Destroy the transaction created recently.
+ */
+ if (session->txn_id != MGMTD_TXN_ID_NONE)
+ mgmt_destroy_txn(&session->txn_id);
+
+ if (ds_ctx && session->ds_read_locked[getdata_req->ds_id])
+ mgmt_fe_session_unlock_ds(getdata_req->ds_id, ds_ctx,
+ session, false, true);
+
+ return -1;
+}
+
+static int mgmt_fe_session_handle_commit_config_req_msg(
+ struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeCommitConfigReq *commcfg_req)
+{
+ struct mgmt_ds_ctx *src_ds_ctx, *dst_ds_ctx;
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->cmt_stats.last_start, NULL);
+ session->adapter->cmt_stats.commit_cnt++;
+ /*
+ * Get the source DS handle.
+ */
+ src_ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
+ commcfg_req->src_ds_id);
+ if (!src_ds_ctx) {
+ mgmt_fe_send_commitcfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+ commcfg_req->validate_only,
+ "No such source DS exists!");
+ return 0;
+ }
+
+ /*
+ * Get the destination DS handle.
+ */
+ dst_ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
+ commcfg_req->dst_ds_id);
+ if (!dst_ds_ctx) {
+ mgmt_fe_send_commitcfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+ commcfg_req->validate_only,
+ "No such destination DS exists!");
+ return 0;
+ }
+
+ /*
+ * Next check first if the SET_CONFIG_REQ is for Candidate DS
+ * or not. Report failure if its not. MGMTD currently only
+ * supports editing the Candidate DS.
+ */
+ if (commcfg_req->dst_ds_id != MGMTD_DS_RUNNING) {
+ mgmt_fe_send_commitcfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+ commcfg_req->validate_only,
+ "Set-Config on datastores other than Running DS not permitted!");
+ return 0;
+ }
+
+ if (session->cfg_txn_id == MGMTD_TXN_ID_NONE) {
+ /*
+ * Start a CONFIG Transaction (if not started already)
+ */
+ session->cfg_txn_id = mgmt_create_txn(session->session_id,
+ MGMTD_TXN_TYPE_CONFIG);
+ if (session->cfg_txn_id == MGMTD_SESSION_ID_NONE) {
+ mgmt_fe_send_commitcfg_reply(
+ session, commcfg_req->src_ds_id,
+ commcfg_req->dst_ds_id, commcfg_req->req_id,
+ MGMTD_INTERNAL_ERROR,
+ commcfg_req->validate_only,
+ "Failed to create a Configuration session!");
+ return 0;
+ }
+ MGMTD_FE_ADAPTER_DBG("Created txn %" PRIu64
+ " for session %" PRIu64
+ " for COMMIT-CFG-REQ",
+ session->cfg_txn_id, session->session_id);
+ }
+
+
+ /*
+ * Try taking write-lock on the destination DS (if not already).
+ */
+ if (!session->ds_write_locked[commcfg_req->dst_ds_id]) {
+ if (mgmt_fe_session_write_lock_ds(commcfg_req->dst_ds_id,
+ dst_ds_ctx, session)
+ != 0) {
+ mgmt_fe_send_commitcfg_reply(
+ session, commcfg_req->src_ds_id,
+ commcfg_req->dst_ds_id, commcfg_req->req_id,
+ MGMTD_DS_LOCK_FAILED,
+ commcfg_req->validate_only,
+ "Failed to lock the destination DS!");
+ return 0;
+ }
+
+ session->ds_locked_implict[commcfg_req->dst_ds_id] = true;
+ }
+
+ /*
+ * Create COMMITConfig request under the transaction
+ */
+ if (mgmt_txn_send_commit_config_req(
+ session->cfg_txn_id, commcfg_req->req_id,
+ commcfg_req->src_ds_id, src_ds_ctx, commcfg_req->dst_ds_id,
+ dst_ds_ctx, commcfg_req->validate_only, commcfg_req->abort,
+ false)
+ != 0) {
+ mgmt_fe_send_commitcfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+ commcfg_req->validate_only,
+ "Request processing for COMMIT-CONFIG failed!");
+ return 0;
+ }
+
+ return 0;
+}
+
+static int
+mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
+ Mgmtd__FeMessage *fe_msg)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ /*
+ * protobuf-c adds a max size enum with an internal, and changing by
+ * version, name; cast to an int to avoid unhandled enum warnings
+ */
+ switch ((int)fe_msg->message_case) {
+ case MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ:
+ MGMTD_FE_ADAPTER_DBG("Got Register Req Msg from '%s'",
+ fe_msg->register_req->client_name);
+
+ if (strlen(fe_msg->register_req->client_name)) {
+ strlcpy(adapter->name,
+ fe_msg->register_req->client_name,
+ sizeof(adapter->name));
+ mgmt_fe_adapter_cleanup_old_conn(adapter);
+ }
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REQ:
+ if (fe_msg->session_req->create
+ && fe_msg->session_req->id_case
+ == MGMTD__FE_SESSION_REQ__ID_CLIENT_CONN_ID) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Got Session Create Req Msg for client-id %llu from '%s'",
+ (unsigned long long)
+ fe_msg->session_req->client_conn_id,
+ adapter->name);
+
+ session = mgmt_fe_create_session(
+ adapter, fe_msg->session_req->client_conn_id);
+ mgmt_fe_send_session_reply(adapter, session, true,
+ session ? true : false);
+ } else if (
+ !fe_msg->session_req->create
+ && fe_msg->session_req->id_case
+ == MGMTD__FE_SESSION_REQ__ID_SESSION_ID) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Got Session Destroy Req Msg for session-id %llu from '%s'",
+ (unsigned long long)
+ fe_msg->session_req->session_id,
+ adapter->name);
+
+ session = mgmt_session_id2ctx(
+ fe_msg->session_req->session_id);
+ mgmt_fe_send_session_reply(adapter, session, false,
+ true);
+ mgmt_fe_cleanup_session(&session);
+ }
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->lockds_req->session_id);
+ MGMTD_FE_ADAPTER_DBG(
+ "Got %sockDS Req Msg for DS:%d for session-id %llx from '%s'",
+ fe_msg->lockds_req->lock ? "L" : "Unl",
+ fe_msg->lockds_req->ds_id,
+ (unsigned long long)fe_msg->lockds_req->session_id,
+ adapter->name);
+ mgmt_fe_session_handle_lockds_req_msg(
+ session, fe_msg->lockds_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->setcfg_req->session_id);
+ session->adapter->setcfg_stats.set_cfg_count++;
+ MGMTD_FE_ADAPTER_DBG(
+ "Got Set Config Req Msg (%d Xpaths, Implicit:%c) on DS:%d for session-id %llu from '%s'",
+ (int)fe_msg->setcfg_req->n_data,
+ fe_msg->setcfg_req->implicit_commit ? 'T':'F',
+ fe_msg->setcfg_req->ds_id,
+ (unsigned long long)fe_msg->setcfg_req->session_id,
+ adapter->name);
+
+ mgmt_fe_session_handle_setcfg_req_msg(
+ session, fe_msg->setcfg_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->commcfg_req->session_id);
+ MGMTD_FE_ADAPTER_DBG(
+ "Got Commit Config Req Msg for src-DS:%d dst-DS:%d (Abort:%c) on session-id %llu from '%s'",
+ fe_msg->commcfg_req->src_ds_id,
+ fe_msg->commcfg_req->dst_ds_id,
+ fe_msg->commcfg_req->abort ? 'T':'F',
+ (unsigned long long)fe_msg->commcfg_req->session_id,
+ adapter->name);
+ mgmt_fe_session_handle_commit_config_req_msg(
+ session, fe_msg->commcfg_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->getcfg_req->session_id);
+ MGMTD_FE_ADAPTER_DBG(
+ "Got Get-Config Req Msg for DS:%d (xpaths: %d) on session-id %llu from '%s'",
+ fe_msg->getcfg_req->ds_id,
+ (int)fe_msg->getcfg_req->n_data,
+ (unsigned long long)fe_msg->getcfg_req->session_id,
+ adapter->name);
+ mgmt_fe_session_handle_getcfg_req_msg(
+ session, fe_msg->getcfg_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->getdata_req->session_id);
+ MGMTD_FE_ADAPTER_DBG(
+ "Got Get-Data Req Msg for DS:%d (xpaths: %d) on session-id %llu from '%s'",
+ fe_msg->getdata_req->ds_id,
+ (int)fe_msg->getdata_req->n_data,
+ (unsigned long long)fe_msg->getdata_req->session_id,
+ adapter->name);
+ mgmt_fe_session_handle_getdata_req_msg(
+ session, fe_msg->getdata_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_NOTIFY_DATA_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ:
+ /*
+ * TODO: Add handling code in future.
+ */
+ break;
+ /*
+ * NOTE: The following messages are always sent from MGMTD to
+ * Frontend clients only and/or need not be handled on MGMTd.
+ */
+ case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE__NOT_SET:
+ default:
+ /*
+ * A 'default' case is being added contrary to the
+ * FRR code guidelines to take care of build
+ * failures on certain build systems (courtesy of
+ * the proto-c package).
+ */
+ break;
+ }
+
+ return 0;
+}
+
+static void mgmt_fe_adapter_process_msg(void *user_ctx, uint8_t *data,
+ size_t len)
+{
+ struct mgmt_fe_client_adapter *adapter = user_ctx;
+ Mgmtd__FeMessage *fe_msg;
+
+ fe_msg = mgmtd__fe_message__unpack(NULL, len, data);
+ if (!fe_msg) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to decode %zu bytes for adapter: %s", len,
+ adapter->name);
+ return;
+ }
+ MGMTD_FE_ADAPTER_DBG(
+ "Decoded %zu bytes of message: %u from adapter: %s", len,
+ fe_msg->message_case, adapter->name);
+ (void)mgmt_fe_adapter_handle_msg(adapter, fe_msg);
+ mgmtd__fe_message__free_unpacked(fe_msg, NULL);
+}
+
+static void mgmt_fe_adapter_proc_msgbufs(struct event *thread)
+{
+ struct mgmt_fe_client_adapter *adapter = EVENT_ARG(thread);
+
+ if (mgmt_msg_procbufs(&adapter->mstate, mgmt_fe_adapter_process_msg,
+ adapter, mgmt_debug_fe))
+ mgmt_fe_adapter_register_event(adapter, MGMTD_FE_PROC_MSG);
+}
+
+static void mgmt_fe_adapter_read(struct event *thread)
+{
+ struct mgmt_fe_client_adapter *adapter = EVENT_ARG(thread);
+ enum mgmt_msg_rsched rv;
+
+ rv = mgmt_msg_read(&adapter->mstate, adapter->conn_fd, mgmt_debug_fe);
+ if (rv == MSR_DISCONNECT) {
+ mgmt_fe_adapter_disconnect(adapter);
+ return;
+ }
+ if (rv == MSR_SCHED_BOTH)
+ mgmt_fe_adapter_register_event(adapter, MGMTD_FE_PROC_MSG);
+ mgmt_fe_adapter_register_event(adapter, MGMTD_FE_CONN_READ);
+}
+
+static void mgmt_fe_adapter_write(struct event *thread)
+{
+ struct mgmt_fe_client_adapter *adapter = EVENT_ARG(thread);
+ enum mgmt_msg_wsched rv;
+
+ rv = mgmt_msg_write(&adapter->mstate, adapter->conn_fd, mgmt_debug_fe);
+ if (rv == MSW_SCHED_STREAM)
+ mgmt_fe_adapter_register_event(adapter, MGMTD_FE_CONN_WRITE);
+ else if (rv == MSW_DISCONNECT)
+ mgmt_fe_adapter_disconnect(adapter);
+ else if (rv == MSW_SCHED_WRITES_OFF) {
+ mgmt_fe_adapter_writes_off(adapter);
+ mgmt_fe_adapter_register_event(adapter,
+ MGMTD_FE_CONN_WRITES_ON);
+ } else
+ assert(rv == MSW_SCHED_NONE);
+}
+
+static void mgmt_fe_adapter_resume_writes(struct event *thread)
+{
+ struct mgmt_fe_client_adapter *adapter;
+
+ adapter = (struct mgmt_fe_client_adapter *)EVENT_ARG(thread);
+ assert(adapter && adapter->conn_fd != -1);
+
+ mgmt_fe_adapter_writes_on(adapter);
+}
+
+static void
+mgmt_fe_adapter_register_event(struct mgmt_fe_client_adapter *adapter,
+ enum mgmt_fe_event event)
+{
+ struct timeval tv = {0};
+
+ switch (event) {
+ case MGMTD_FE_CONN_READ:
+ event_add_read(mgmt_fe_adapter_tm, mgmt_fe_adapter_read,
+ adapter, adapter->conn_fd, &adapter->conn_read_ev);
+ break;
+ case MGMTD_FE_CONN_WRITE:
+ event_add_write(mgmt_fe_adapter_tm,
+ mgmt_fe_adapter_write, adapter,
+ adapter->conn_fd, &adapter->conn_write_ev);
+ break;
+ case MGMTD_FE_PROC_MSG:
+ tv.tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC;
+ event_add_timer_tv(mgmt_fe_adapter_tm,
+ mgmt_fe_adapter_proc_msgbufs, adapter,
+ &tv, &adapter->proc_msg_ev);
+ break;
+ case MGMTD_FE_CONN_WRITES_ON:
+ event_add_timer_msec(mgmt_fe_adapter_tm,
+ mgmt_fe_adapter_resume_writes, adapter,
+ MGMTD_FE_MSG_WRITE_DELAY_MSEC,
+ &adapter->conn_writes_on);
+ break;
+ case MGMTD_FE_SERVER:
+ assert(!"mgmt_fe_adapter_post_event() called incorrectly");
+ break;
+ }
+}
+
+void mgmt_fe_adapter_lock(struct mgmt_fe_client_adapter *adapter)
+{
+ adapter->refcount++;
+}
+
+extern void
+mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter)
+{
+ assert(*adapter && (*adapter)->refcount);
+
+ (*adapter)->refcount--;
+ if (!(*adapter)->refcount) {
+ mgmt_fe_adapters_del(&mgmt_fe_adapters, *adapter);
+ EVENT_OFF((*adapter)->conn_read_ev);
+ EVENT_OFF((*adapter)->conn_write_ev);
+ EVENT_OFF((*adapter)->proc_msg_ev);
+ EVENT_OFF((*adapter)->conn_writes_on);
+ mgmt_msg_destroy(&(*adapter)->mstate);
+ XFREE(MTYPE_MGMTD_FE_ADPATER, *adapter);
+ }
+
+ *adapter = NULL;
+}
+
+int mgmt_fe_adapter_init(struct event_loop *tm, struct mgmt_master *mm)
+{
+ if (!mgmt_fe_adapter_tm) {
+ mgmt_fe_adapter_tm = tm;
+ mgmt_fe_adapter_mm = mm;
+ mgmt_fe_adapters_init(&mgmt_fe_adapters);
+
+ assert(!mgmt_fe_sessions);
+ mgmt_fe_sessions = hash_create(mgmt_fe_session_hash_key,
+ mgmt_fe_session_hash_cmp,
+ "MGMT Frontend Sessions");
+ }
+
+ return 0;
+}
+
+void mgmt_fe_adapter_destroy(void)
+{
+ mgmt_fe_cleanup_adapters();
+ mgmt_fe_session_hash_destroy();
+}
+
+struct mgmt_fe_client_adapter *
+mgmt_fe_create_adapter(int conn_fd, union sockunion *from)
+{
+ struct mgmt_fe_client_adapter *adapter = NULL;
+
+ adapter = mgmt_fe_find_adapter_by_fd(conn_fd);
+ if (!adapter) {
+ adapter = XCALLOC(MTYPE_MGMTD_FE_ADPATER,
+ sizeof(struct mgmt_fe_client_adapter));
+ assert(adapter);
+
+ adapter->conn_fd = conn_fd;
+ memcpy(&adapter->conn_su, from, sizeof(adapter->conn_su));
+ snprintf(adapter->name, sizeof(adapter->name), "Unknown-FD-%d",
+ adapter->conn_fd);
+ mgmt_fe_sessions_init(&adapter->fe_sessions);
+
+ mgmt_msg_init(&adapter->mstate, MGMTD_FE_MAX_NUM_MSG_PROC,
+ MGMTD_FE_MAX_NUM_MSG_WRITE, MGMTD_FE_MSG_MAX_LEN,
+ "FE-adapter");
+ mgmt_fe_adapter_lock(adapter);
+
+ mgmt_fe_adapter_register_event(adapter, MGMTD_FE_CONN_READ);
+ mgmt_fe_adapters_add_tail(&mgmt_fe_adapters, adapter);
+
+ adapter->setcfg_stats.min_tm = ULONG_MAX;
+ adapter->cmt_stats.min_tm = ULONG_MAX;
+ MGMTD_FE_ADAPTER_DBG("Added new MGMTD Frontend adapter '%s'",
+ adapter->name);
+ }
+
+ /* Make client socket non-blocking. */
+ set_nonblocking(adapter->conn_fd);
+ setsockopt_so_sendbuf(adapter->conn_fd,
+ MGMTD_SOCKET_FE_SEND_BUF_SIZE);
+ setsockopt_so_recvbuf(adapter->conn_fd,
+ MGMTD_SOCKET_FE_RECV_BUF_SIZE);
+ return adapter;
+}
+
+struct mgmt_fe_client_adapter *mgmt_fe_get_adapter(const char *name)
+{
+ return mgmt_fe_find_adapter_by_name(name);
+}
+
+int mgmt_fe_send_set_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ enum mgmt_result result,
+ const char *error_if_any,
+ bool implicit_commit)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->cfg_txn_id != txn_id) {
+ if (session)
+ MGMTD_FE_ADAPTER_ERR(
+ "Txn_id doesnot match, session txn is 0x%llx, current txn 0x%llx",
+ (unsigned long long)session->cfg_txn_id,
+ (unsigned long long)txn_id);
+ return -1;
+ }
+
+ return mgmt_fe_send_setcfg_reply(
+ session, ds_id, req_id, result == MGMTD_SUCCESS ? true : false,
+ error_if_any, implicit_commit);
+}
+
+int mgmt_fe_send_commit_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id,
+ uint64_t req_id, bool validate_only,
+ enum mgmt_result result,
+ const char *error_if_any)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->cfg_txn_id != txn_id)
+ return -1;
+
+ return mgmt_fe_send_commitcfg_reply(session, src_ds_id, dst_ds_id,
+ req_id, result, validate_only,
+ error_if_any);
+}
+
+int mgmt_fe_send_get_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ enum mgmt_result result,
+ Mgmtd__YangDataReply *data_resp,
+ const char *error_if_any)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->txn_id != txn_id)
+ return -1;
+
+ return mgmt_fe_send_getcfg_reply(session, ds_id, req_id,
+ result == MGMTD_SUCCESS, data_resp,
+ error_if_any);
+}
+
+int mgmt_fe_send_get_data_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ enum mgmt_result result,
+ Mgmtd__YangDataReply *data_resp,
+ const char *error_if_any)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->txn_id != txn_id)
+ return -1;
+
+ return mgmt_fe_send_getdata_reply(session, ds_id, req_id,
+ result == MGMTD_SUCCESS,
+ data_resp, error_if_any);
+}
+
+int mgmt_fe_send_data_notify(Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangData * data_resp[], int num_data)
+{
+ /* struct mgmt_fe_session_ctx *session; */
+
+ return 0;
+}
+
+struct mgmt_setcfg_stats *
+mgmt_fe_get_session_setcfg_stats(uint64_t session_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || !session->adapter)
+ return NULL;
+
+ return &session->adapter->setcfg_stats;
+}
+
+struct mgmt_commit_stats *
+mgmt_fe_get_session_commit_stats(uint64_t session_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || !session->adapter)
+ return NULL;
+
+ return &session->adapter->cmt_stats;
+}
+
+static void
+mgmt_fe_adapter_cmt_stats_write(struct vty *vty,
+ struct mgmt_fe_client_adapter *adapter)
+{
+ char buf[100] = {0};
+
+ if (!mm->perf_stats_en)
+ return;
+
+ vty_out(vty, " Num-Commits: \t\t\t%lu\n",
+ adapter->cmt_stats.commit_cnt);
+ if (adapter->cmt_stats.commit_cnt > 0) {
+ if (mm->perf_stats_en)
+ vty_out(vty, " Max-Commit-Duration: \t\t%lu uSecs\n",
+ adapter->cmt_stats.max_tm);
+ vty_out(vty, " Max-Commit-Batch-Size: \t\t%lu\n",
+ adapter->cmt_stats.max_batch_cnt);
+ if (mm->perf_stats_en)
+ vty_out(vty, " Min-Commit-Duration: \t\t%lu uSecs\n",
+ adapter->cmt_stats.min_tm);
+ vty_out(vty, " Min-Commit-Batch-Size: \t\t%lu\n",
+ adapter->cmt_stats.min_batch_cnt);
+ if (mm->perf_stats_en)
+ vty_out(vty,
+ " Last-Commit-Duration: \t\t%lu uSecs\n",
+ adapter->cmt_stats.last_exec_tm);
+ vty_out(vty, " Last-Commit-Batch-Size: \t\t%lu\n",
+ adapter->cmt_stats.last_batch_cnt);
+ vty_out(vty, " Last-Commit-CfgData-Reqs: \t\t%lu\n",
+ adapter->cmt_stats.last_num_cfgdata_reqs);
+ vty_out(vty, " Last-Commit-CfgApply-Reqs: \t\t%lu\n",
+ adapter->cmt_stats.last_num_apply_reqs);
+ if (mm->perf_stats_en) {
+ vty_out(vty, " Last-Commit-Details:\n");
+ vty_out(vty, " Commit Start: \t\t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.last_start, buf,
+ sizeof(buf)));
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ vty_out(vty, " Config-Validate Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.validate_start, buf,
+ sizeof(buf)));
+#endif
+ vty_out(vty, " Prep-Config Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.prep_cfg_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Txn-Create Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.txn_create_start,
+ buf, sizeof(buf)));
+ vty_out(vty,
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ " Send-Config Start: \t\t%s\n",
+#else
+ " Send-Config-Validate Start: \t%s\n",
+#endif
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.send_cfg_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Apply-Config Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.apply_cfg_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Apply-Config End: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.apply_cfg_end, buf,
+ sizeof(buf)));
+ vty_out(vty, " Txn-Delete Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.txn_del_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Commit End: \t\t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.last_end, buf,
+ sizeof(buf)));
+ }
+ }
+}
+
+static void
+mgmt_fe_adapter_setcfg_stats_write(struct vty *vty,
+ struct mgmt_fe_client_adapter *adapter)
+{
+ char buf[100] = {0};
+
+ if (!mm->perf_stats_en)
+ return;
+
+ vty_out(vty, " Num-Set-Cfg: \t\t\t%lu\n",
+ adapter->setcfg_stats.set_cfg_count);
+ if (mm->perf_stats_en && adapter->setcfg_stats.set_cfg_count > 0) {
+ vty_out(vty, " Max-Set-Cfg-Duration: \t\t%lu uSec\n",
+ adapter->setcfg_stats.max_tm);
+ vty_out(vty, " Min-Set-Cfg-Duration: \t\t%lu uSec\n",
+ adapter->setcfg_stats.min_tm);
+ vty_out(vty, " Avg-Set-Cfg-Duration: \t\t%lu uSec\n",
+ adapter->setcfg_stats.avg_tm);
+ vty_out(vty, " Last-Set-Cfg-Details:\n");
+ vty_out(vty, " Set-Cfg Start: \t\t\t%s\n",
+ mgmt_realtime_to_string(&adapter->setcfg_stats.last_start,
+ buf, sizeof(buf)));
+ vty_out(vty, " Set-Cfg End: \t\t\t%s\n",
+ mgmt_realtime_to_string(&adapter->setcfg_stats.last_end,
+ buf, sizeof(buf)));
+ }
+}
+
+void mgmt_fe_adapter_status_write(struct vty *vty, bool detail)
+{
+ struct mgmt_fe_client_adapter *adapter;
+ struct mgmt_fe_session_ctx *session;
+ Mgmtd__DatastoreId ds_id;
+ bool locked = false;
+
+ vty_out(vty, "MGMTD Frontend Adpaters\n");
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ vty_out(vty, " Client: \t\t\t\t%s\n", adapter->name);
+ vty_out(vty, " Conn-FD: \t\t\t\t%d\n", adapter->conn_fd);
+ if (detail) {
+ mgmt_fe_adapter_setcfg_stats_write(vty, adapter);
+ mgmt_fe_adapter_cmt_stats_write(vty, adapter);
+ }
+ vty_out(vty, " Sessions\n");
+ FOREACH_SESSION_IN_LIST (adapter, session) {
+ vty_out(vty, " Session: \t\t\t\t%p\n", session);
+ vty_out(vty, " Client-Id: \t\t\t%llu\n",
+ (unsigned long long)session->client_id);
+ vty_out(vty, " Session-Id: \t\t\t%llx\n",
+ (unsigned long long)session->session_id);
+ vty_out(vty, " DS-Locks:\n");
+ FOREACH_MGMTD_DS_ID (ds_id) {
+ if (session->ds_write_locked[ds_id]
+ || session->ds_read_locked[ds_id]) {
+ locked = true;
+ vty_out(vty,
+ " %s\t\t\t%s, %s\n",
+ mgmt_ds_id2name(ds_id),
+ session->ds_write_locked[ds_id]
+ ? "Write"
+ : "Read",
+ session->ds_locked_implict[ds_id]
+ ? "Implicit"
+ : "Explicit");
+ }
+ }
+ if (!locked)
+ vty_out(vty, " None\n");
+ }
+ vty_out(vty, " Total-Sessions: \t\t\t%d\n",
+ (int)mgmt_fe_sessions_count(&adapter->fe_sessions));
+ vty_out(vty, " Msg-Recvd: \t\t\t\t%" PRIu64 "\n",
+ adapter->mstate.nrxm);
+ vty_out(vty, " Bytes-Recvd: \t\t\t%" PRIu64 "\n",
+ adapter->mstate.nrxb);
+ vty_out(vty, " Msg-Sent: \t\t\t\t%" PRIu64 "\n",
+ adapter->mstate.ntxm);
+ vty_out(vty, " Bytes-Sent: \t\t\t%" PRIu64 "\n",
+ adapter->mstate.ntxb);
+ }
+ vty_out(vty, " Total: %d\n",
+ (int)mgmt_fe_adapters_count(&mgmt_fe_adapters));
+}
+
+void mgmt_fe_adapter_perf_measurement(struct vty *vty, bool config)
+{
+ mm->perf_stats_en = config;
+}
+
+void mgmt_fe_adapter_reset_perf_stats(struct vty *vty)
+{
+ struct mgmt_fe_client_adapter *adapter;
+ struct mgmt_fe_session_ctx *session;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ memset(&adapter->setcfg_stats, 0, sizeof(adapter->setcfg_stats));
+ FOREACH_SESSION_IN_LIST (adapter, session) {
+ memset(&adapter->cmt_stats, 0, sizeof(adapter->cmt_stats));
+ }
+ }
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Connection Adapter
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_FE_ADAPTER_H_
+#define _FRR_MGMTD_FE_ADAPTER_H_
+
+#include "mgmt_fe_client.h"
+#include "mgmt_msg.h"
+#include "mgmtd/mgmt_defines.h"
+
+struct mgmt_fe_client_adapter;
+struct mgmt_master;
+
+struct mgmt_commit_stats {
+ struct timeval last_start;
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ struct timeval validate_start;
+#endif
+ struct timeval prep_cfg_start;
+ struct timeval txn_create_start;
+ struct timeval send_cfg_start;
+ struct timeval apply_cfg_start;
+ struct timeval apply_cfg_end;
+ struct timeval txn_del_start;
+ struct timeval last_end;
+ unsigned long last_exec_tm;
+ unsigned long max_tm;
+ unsigned long min_tm;
+ unsigned long last_batch_cnt;
+ unsigned long last_num_cfgdata_reqs;
+ unsigned long last_num_apply_reqs;
+ unsigned long max_batch_cnt;
+ unsigned long min_batch_cnt;
+ unsigned long commit_cnt;
+};
+
+struct mgmt_setcfg_stats {
+ struct timeval last_start;
+ struct timeval last_end;
+ unsigned long last_exec_tm;
+ unsigned long max_tm;
+ unsigned long min_tm;
+ unsigned long avg_tm;
+ unsigned long set_cfg_count;
+};
+
+PREDECL_LIST(mgmt_fe_sessions);
+
+PREDECL_LIST(mgmt_fe_adapters);
+
+struct mgmt_fe_client_adapter {
+ int conn_fd;
+ union sockunion conn_su;
+ struct event *conn_read_ev;
+ struct event *conn_write_ev;
+ struct event *conn_writes_on;
+ struct event *proc_msg_ev;
+ uint32_t flags;
+
+ char name[MGMTD_CLIENT_NAME_MAX_LEN];
+
+ /* List of sessions created and being maintained for this client. */
+ struct mgmt_fe_sessions_head fe_sessions;
+
+ /* IO streams for read and write */
+ struct mgmt_msg_state mstate;
+
+ int refcount;
+ struct mgmt_commit_stats cmt_stats;
+ struct mgmt_setcfg_stats setcfg_stats;
+
+ struct mgmt_fe_adapters_item list_linkage;
+};
+
+#define MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF (1U << 0)
+
+DECLARE_LIST(mgmt_fe_adapters, struct mgmt_fe_client_adapter, list_linkage);
+
+/* Initialise frontend adapter module */
+extern int mgmt_fe_adapter_init(struct event_loop *tm, struct mgmt_master *cm);
+
+/* Destroy frontend adapter module */
+extern void mgmt_fe_adapter_destroy(void);
+
+/* Acquire lock for frontend adapter */
+extern void mgmt_fe_adapter_lock(struct mgmt_fe_client_adapter *adapter);
+
+/* Remove lock from frontend adapter */
+extern void
+mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter);
+
+/* Create frontend adapter */
+extern struct mgmt_fe_client_adapter *
+mgmt_fe_create_adapter(int conn_fd, union sockunion *su);
+
+/* Fetch frontend adapter given a name */
+extern struct mgmt_fe_client_adapter *
+mgmt_fe_get_adapter(const char *name);
+
+/*
+ * Send set-config reply to the frontend client.
+ *
+ * session
+ * Unique session identifier.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * ds_id
+ * Datastore ID.
+ *
+ * req_id
+ * Config request ID.
+ *
+ * result
+ * Config request result (MGMT_*).
+ *
+ * error_if_any
+ * Buffer to store human-readable error message in case of error.
+ *
+ * implicit_commit
+ * TRUE if the commit is implicit, FALSE otherwise.
+ *
+ * Returns:
+ * 0 on success, -1 on failures.
+ */
+extern int mgmt_fe_send_set_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id,
+ enum mgmt_result result,
+ const char *error_if_any,
+ bool implcit_commit);
+
+/*
+ * Send commit-config reply to the frontend client.
+ */
+extern int mgmt_fe_send_commit_cfg_reply(
+ uint64_t session_id, uint64_t txn_id, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id, uint64_t req_id, bool validate_only,
+ enum mgmt_result result, const char *error_if_any);
+
+/*
+ * Send get-config reply to the frontend client.
+ */
+extern int mgmt_fe_send_get_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id,
+ enum mgmt_result result,
+ Mgmtd__YangDataReply *data_resp,
+ const char *error_if_any);
+
+/*
+ * Send get-data reply to the frontend client.
+ */
+extern int mgmt_fe_send_get_data_reply(
+ uint64_t session_id, uint64_t txn_id, Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, enum mgmt_result result,
+ Mgmtd__YangDataReply *data_resp, const char *error_if_any);
+
+/*
+ * Send data notify to the frontend client.
+ */
+extern int mgmt_fe_send_data_notify(Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangData * data_resp[],
+ int num_data);
+
+/* Fetch frontend client session set-config stats */
+extern struct mgmt_setcfg_stats *
+mgmt_fe_get_session_setcfg_stats(uint64_t session_id);
+
+/* Fetch frontend client session commit stats */
+extern struct mgmt_commit_stats *
+mgmt_fe_get_session_commit_stats(uint64_t session_id);
+
+extern void mgmt_fe_adapter_status_write(struct vty *vty, bool detail);
+extern void mgmt_fe_adapter_perf_measurement(struct vty *vty, bool config);
+extern void mgmt_fe_adapter_reset_perf_stats(struct vty *vty);
+#endif /* _FRR_MGMTD_FE_ADAPTER_H_ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Server
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "network.h"
+#include "libfrr.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_fe_server.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_FE_SRVR_DBG(fmt, ...) \
+ fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_FE_SRVR_ERR(fmt, ...) \
+ fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_FE_SRVR_DBG(fmt, ...) \
+ do { \
+ if (mgmt_debug_fe) \
+ zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define MGMTD_FE_SRVR_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+static int mgmt_fe_listen_fd = -1;
+static struct event_loop *mgmt_fe_listen_tm;
+static struct event *mgmt_fe_listen_ev;
+static void mgmt_fe_server_register_event(enum mgmt_fe_event event);
+
+static void mgmt_fe_conn_accept(struct event *thread)
+{
+ int client_conn_fd;
+ union sockunion su;
+
+ if (mgmt_fe_listen_fd < 0)
+ return;
+
+ /* We continue hearing server listen socket. */
+ mgmt_fe_server_register_event(MGMTD_FE_SERVER);
+
+ memset(&su, 0, sizeof(union sockunion));
+
+ /* We can handle IPv4 or IPv6 socket. */
+ client_conn_fd = sockunion_accept(mgmt_fe_listen_fd, &su);
+ if (client_conn_fd < 0) {
+ MGMTD_FE_SRVR_ERR(
+ "Failed to accept MGMTD Frontend client connection : %s",
+ safe_strerror(errno));
+ return;
+ }
+ set_nonblocking(client_conn_fd);
+ set_cloexec(client_conn_fd);
+
+ MGMTD_FE_SRVR_DBG("Got a new MGMTD Frontend connection");
+
+ mgmt_fe_create_adapter(client_conn_fd, &su);
+}
+
+static void mgmt_fe_server_register_event(enum mgmt_fe_event event)
+{
+ if (event == MGMTD_FE_SERVER) {
+ event_add_read(mgmt_fe_listen_tm, mgmt_fe_conn_accept,
+ NULL, mgmt_fe_listen_fd,
+ &mgmt_fe_listen_ev);
+ assert(mgmt_fe_listen_ev);
+ } else {
+ assert(!"mgmt_fe_server_post_event() called incorrectly");
+ }
+}
+
+static void mgmt_fe_server_start(const char *hostname)
+{
+ int ret;
+ int sock;
+ struct sockaddr_un addr;
+ mode_t old_mask;
+
+ /* Set umask */
+ old_mask = umask(0077);
+
+ sock = socket(AF_UNIX, SOCK_STREAM, PF_UNSPEC);
+ if (sock < 0) {
+ MGMTD_FE_SRVR_ERR("Failed to create server socket: %s",
+ safe_strerror(errno));
+ goto mgmt_fe_server_start_failed;
+ }
+
+ addr.sun_family = AF_UNIX,
+ strlcpy(addr.sun_path, MGMTD_FE_SERVER_PATH, sizeof(addr.sun_path));
+ unlink(addr.sun_path);
+ ret = bind(sock, (struct sockaddr *)&addr, sizeof(addr));
+ if (ret < 0) {
+ MGMTD_FE_SRVR_ERR(
+ "Failed to bind server socket to '%s'. Err: %s",
+ addr.sun_path, safe_strerror(errno));
+ goto mgmt_fe_server_start_failed;
+ }
+
+ ret = listen(sock, MGMTD_FE_MAX_CONN);
+ if (ret < 0) {
+ MGMTD_FE_SRVR_ERR("Failed to listen on server socket: %s",
+ safe_strerror(errno));
+ goto mgmt_fe_server_start_failed;
+ }
+
+ /* Restore umask */
+ umask(old_mask);
+
+ mgmt_fe_listen_fd = sock;
+ mgmt_fe_server_register_event(MGMTD_FE_SERVER);
+
+ MGMTD_FE_SRVR_DBG("Started MGMTD Frontend Server!");
+ return;
+
+mgmt_fe_server_start_failed:
+ if (sock)
+ close(sock);
+
+ mgmt_fe_listen_fd = -1;
+ exit(-1);
+}
+
+int mgmt_fe_server_init(struct event_loop *master)
+{
+ if (mgmt_fe_listen_tm) {
+ MGMTD_FE_SRVR_DBG("MGMTD Frontend Server already running!");
+ return 0;
+ }
+
+ mgmt_fe_listen_tm = master;
+
+ mgmt_fe_server_start("localhost");
+
+ return 0;
+}
+
+void mgmt_fe_server_destroy(void)
+{
+ if (mgmt_fe_listen_tm) {
+ MGMTD_FE_SRVR_DBG("Closing MGMTD Frontend Server!");
+
+ if (mgmt_fe_listen_ev) {
+ EVENT_OFF(mgmt_fe_listen_ev);
+ mgmt_fe_listen_ev = NULL;
+ }
+
+ if (mgmt_fe_listen_fd >= 0) {
+ close(mgmt_fe_listen_fd);
+ mgmt_fe_listen_fd = -1;
+ }
+
+ mgmt_fe_listen_tm = NULL;
+ }
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Server
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_FE_SERVER_H_
+#define _FRR_MGMTD_FE_SERVER_H_
+
+#define MGMTD_FE_MAX_CONN 32
+
+/* Initialise frontend server */
+extern int mgmt_fe_server_init(struct event_loop *master);
+
+/* Destroy frontend server */
+extern void mgmt_fe_server_destroy(void);
+
+#endif /* _FRR_MGMTD_FE_SERVER_H_ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+
+#include <zebra.h>
+#include "md5.h"
+#include "frrevent.h"
+#include "xref.h"
+
+#include "mgmt_fe_client.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+
+struct mgmt_cmt_info_t {
+ struct mgmt_cmt_infos_item cmts;
+
+ char cmtid_str[MGMTD_MD5_HASH_STR_HEX_LEN];
+ char time_str[MGMTD_COMMIT_TIME_STR_LEN];
+ char cmt_json_file[PATH_MAX];
+};
+
+
+DECLARE_DLIST(mgmt_cmt_infos, struct mgmt_cmt_info_t, cmts);
+
+#define FOREACH_CMT_REC(mm, cmt_info) \
+ frr_each_safe (mgmt_cmt_infos, &mm->cmts, cmt_info)
+
+/*
+ * The only instance of VTY session that has triggered an ongoing
+ * config rollback operation.
+ */
+static struct vty *rollback_vty = NULL;
+
+static bool mgmt_history_record_exists(char *file_path)
+{
+ int exist;
+
+ exist = access(file_path, F_OK);
+ if (exist == 0)
+ return true;
+ else
+ return false;
+}
+
+static void mgmt_history_remove_file(char *name)
+{
+ if (remove(name) == 0)
+ zlog_debug("Old commit info deletion succeeded");
+ else
+ zlog_err("Old commit info deletion failed");
+}
+
+static void mgmt_history_hash(const char *input_str, char *hash)
+{
+ int i;
+ unsigned char digest[MGMTD_MD5_HASH_LEN];
+ MD5_CTX ctx;
+
+ memset(&ctx, 0, sizeof(ctx));
+ MD5Init(&ctx);
+ MD5Update(&ctx, input_str, strlen(input_str));
+ MD5Final(digest, &ctx);
+
+ for (i = 0; i < MGMTD_MD5_HASH_LEN; i++)
+ snprintf(&hash[i * 2], MGMTD_MD5_HASH_STR_HEX_LEN, "%02x",
+ (unsigned int)digest[i]);
+}
+
+static struct mgmt_cmt_info_t *mgmt_history_create_cmt_rec(void)
+{
+ struct mgmt_cmt_info_t *new;
+ struct mgmt_cmt_info_t *cmt_info;
+ struct mgmt_cmt_info_t *last_cmt_info = NULL;
+ struct timeval cmt_recd_tv;
+
+ new = XCALLOC(MTYPE_MGMTD_CMT_INFO, sizeof(struct mgmt_cmt_info_t));
+ gettimeofday(&cmt_recd_tv, NULL);
+ mgmt_realtime_to_string(&cmt_recd_tv, new->time_str,
+ sizeof(new->time_str));
+ mgmt_history_hash(new->time_str, new->cmtid_str);
+ snprintf(new->cmt_json_file, sizeof(new->cmt_json_file),
+ MGMTD_COMMIT_FILE_PATH, new->cmtid_str);
+
+ if (mgmt_cmt_infos_count(&mm->cmts) == MGMTD_MAX_COMMIT_LIST) {
+ FOREACH_CMT_REC (mm, cmt_info)
+ last_cmt_info = cmt_info;
+
+ if (last_cmt_info) {
+ mgmt_history_remove_file(last_cmt_info->cmt_json_file);
+ mgmt_cmt_infos_del(&mm->cmts, last_cmt_info);
+ XFREE(MTYPE_MGMTD_CMT_INFO, last_cmt_info);
+ }
+ }
+
+ mgmt_cmt_infos_add_head(&mm->cmts, new);
+ return new;
+}
+
+static struct mgmt_cmt_info_t *mgmt_history_find_cmt_record(const char *cmtid_str)
+{
+ struct mgmt_cmt_info_t *cmt_info;
+
+ FOREACH_CMT_REC (mm, cmt_info) {
+ if (strncmp(cmt_info->cmtid_str, cmtid_str,
+ MGMTD_MD5_HASH_STR_HEX_LEN) == 0)
+ return cmt_info;
+ }
+
+ return NULL;
+}
+
+static bool mgmt_history_read_cmt_record_index(void)
+{
+ FILE *fp;
+ struct mgmt_cmt_info_t cmt_info;
+ struct mgmt_cmt_info_t *new;
+ int cnt = 0;
+
+ fp = fopen(MGMTD_COMMIT_INDEX_FILE_NAME, "rb");
+ if (!fp) {
+ zlog_err("Failed to open file %s rb mode",
+ MGMTD_COMMIT_INDEX_FILE_NAME);
+ return false;
+ }
+
+ while ((fread(&cmt_info, sizeof(cmt_info), 1, fp)) > 0) {
+ if (cnt < MGMTD_MAX_COMMIT_LIST) {
+ if (!mgmt_history_record_exists(cmt_info.cmt_json_file)) {
+ zlog_err(
+ "Commit record present in index_file, but commit file %s missing",
+ cmt_info.cmt_json_file);
+ continue;
+ }
+
+ new = XCALLOC(MTYPE_MGMTD_CMT_INFO,
+ sizeof(struct mgmt_cmt_info_t));
+ memcpy(new, &cmt_info, sizeof(struct mgmt_cmt_info_t));
+ mgmt_cmt_infos_add_tail(&mm->cmts, new);
+ } else {
+ zlog_err("More records found in index file %s",
+ MGMTD_COMMIT_INDEX_FILE_NAME);
+ fclose(fp);
+ return false;
+ }
+
+ cnt++;
+ }
+
+ fclose(fp);
+ return true;
+}
+
+static bool mgmt_history_dump_cmt_record_index(void)
+{
+ FILE *fp;
+ int ret = 0;
+ struct mgmt_cmt_info_t *cmt_info;
+ struct mgmt_cmt_info_t cmt_info_set[10];
+ int cnt = 0;
+
+ mgmt_history_remove_file((char *)MGMTD_COMMIT_INDEX_FILE_NAME);
+ fp = fopen(MGMTD_COMMIT_INDEX_FILE_NAME, "ab");
+ if (!fp) {
+ zlog_err("Failed to open file %s ab mode",
+ MGMTD_COMMIT_INDEX_FILE_NAME);
+ return false;
+ }
+
+ FOREACH_CMT_REC (mm, cmt_info) {
+ memcpy(&cmt_info_set[cnt], cmt_info,
+ sizeof(struct mgmt_cmt_info_t));
+ cnt++;
+ }
+
+ if (!cnt) {
+ fclose(fp);
+ return false;
+ }
+
+ ret = fwrite(&cmt_info_set, sizeof(struct mgmt_cmt_info_t), cnt, fp);
+ fclose(fp);
+ if (ret != cnt) {
+ zlog_err("Write record failed");
+ return false;
+ } else {
+ return true;
+ }
+}
+
+static int mgmt_history_rollback_to_cmt(struct vty *vty,
+ struct mgmt_cmt_info_t *cmt_info,
+ bool skip_file_load)
+{
+ struct mgmt_ds_ctx *src_ds_ctx;
+ struct mgmt_ds_ctx *dst_ds_ctx;
+ int ret = 0;
+
+ if (rollback_vty) {
+ vty_out(vty, "ERROR: Rollback already in progress!\n");
+ return -1;
+ }
+
+ src_ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_CANDIDATE);
+ if (!src_ds_ctx) {
+ vty_out(vty, "ERROR: Couldnot access Candidate datastore!\n");
+ return -1;
+ }
+
+ /*
+ * Note: Write lock on src_ds is not required. This is already
+ * taken in 'conf te'.
+ */
+ dst_ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_RUNNING);
+ if (!dst_ds_ctx) {
+ vty_out(vty, "ERROR: Couldnot access Running datastore!\n");
+ return -1;
+ }
+
+ ret = mgmt_ds_write_lock(dst_ds_ctx);
+ if (ret != 0) {
+ vty_out(vty,
+ "Failed to lock the DS %u for rollback Reason: %s!\n",
+ MGMTD_DS_RUNNING, strerror(ret));
+ return -1;
+ }
+
+ if (!skip_file_load) {
+ ret = mgmt_ds_load_config_from_file(
+ src_ds_ctx, cmt_info->cmt_json_file, false);
+ if (ret != 0) {
+ mgmt_ds_unlock(dst_ds_ctx);
+ vty_out(vty,
+ "Error with parsing the file with error code %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ /* Internally trigger a commit-request. */
+ ret = mgmt_txn_rollback_trigger_cfg_apply(src_ds_ctx, dst_ds_ctx);
+ if (ret != 0) {
+ mgmt_ds_unlock(dst_ds_ctx);
+ vty_out(vty,
+ "Error with creating commit apply txn with error code %d\n",
+ ret);
+ return ret;
+ }
+
+ mgmt_history_dump_cmt_record_index();
+
+ /*
+ * Block the rollback command from returning till the rollback
+ * is completed. On rollback completion mgmt_history_rollback_complete()
+ * shall be called to resume the rollback command return to VTYSH.
+ */
+ vty->mgmt_req_pending = true;
+ rollback_vty = vty;
+ return 0;
+}
+
+void mgmt_history_rollback_complete(bool success)
+{
+ vty_mgmt_resume_response(rollback_vty, success);
+ rollback_vty = NULL;
+}
+
+int mgmt_history_rollback_by_id(struct vty *vty, const char *cmtid_str)
+{
+ int ret = 0;
+ struct mgmt_cmt_info_t *cmt_info;
+
+ if (!mgmt_cmt_infos_count(&mm->cmts) ||
+ !mgmt_history_find_cmt_record(cmtid_str)) {
+ vty_out(vty, "Invalid commit Id\n");
+ return -1;
+ }
+
+ FOREACH_CMT_REC (mm, cmt_info) {
+ if (strncmp(cmt_info->cmtid_str, cmtid_str,
+ MGMTD_MD5_HASH_STR_HEX_LEN) == 0) {
+ ret = mgmt_history_rollback_to_cmt(vty, cmt_info, false);
+ return ret;
+ }
+
+ mgmt_history_remove_file(cmt_info->cmt_json_file);
+ mgmt_cmt_infos_del(&mm->cmts, cmt_info);
+ XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
+ }
+
+ return 0;
+}
+
+int mgmt_history_rollback_n(struct vty *vty, int num_cmts)
+{
+ int ret = 0;
+ int cnt = 0;
+ struct mgmt_cmt_info_t *cmt_info;
+ size_t cmts;
+
+ if (!num_cmts)
+ num_cmts = 1;
+
+ cmts = mgmt_cmt_infos_count(&mm->cmts);
+ if ((int)cmts < num_cmts) {
+ vty_out(vty,
+ "Number of commits found (%d) less than required to rollback\n",
+ (int)cmts);
+ return -1;
+ }
+
+ if ((int)cmts == 1 || (int)cmts == num_cmts) {
+ vty_out(vty,
+ "Number of commits found (%d), Rollback of last commit is not supported\n",
+ (int)cmts);
+ return -1;
+ }
+
+ FOREACH_CMT_REC (mm, cmt_info) {
+ if (cnt == num_cmts) {
+ ret = mgmt_history_rollback_to_cmt(vty, cmt_info, false);
+ return ret;
+ }
+
+ cnt++;
+ mgmt_history_remove_file(cmt_info->cmt_json_file);
+ mgmt_cmt_infos_del(&mm->cmts, cmt_info);
+ XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
+ }
+
+ if (!mgmt_cmt_infos_count(&mm->cmts)) {
+ mgmt_ds_reset_candidate();
+ ret = mgmt_history_rollback_to_cmt(vty, cmt_info, true);
+ }
+
+ return ret;
+}
+
+void show_mgmt_cmt_history(struct vty *vty)
+{
+ struct mgmt_cmt_info_t *cmt_info;
+ int slno = 0;
+
+ vty_out(vty, "Last 10 commit history:\n");
+ vty_out(vty, " Sl.No\tCommit-ID(HEX)\t\t\t Commit-Record-Time\n");
+ FOREACH_CMT_REC (mm, cmt_info) {
+ vty_out(vty, " %d\t%s %s\n", slno, cmt_info->cmtid_str,
+ cmt_info->time_str);
+ slno++;
+ }
+}
+
+void mgmt_history_new_record(struct mgmt_ds_ctx *ds_ctx)
+{
+ struct mgmt_cmt_info_t *cmt_info = mgmt_history_create_cmt_rec();
+ mgmt_ds_dump_ds_to_file(cmt_info->cmt_json_file, ds_ctx);
+ mgmt_history_dump_cmt_record_index();
+}
+
+void mgmt_history_init(void)
+{
+ /* Create commit record for previously stored commit-apply */
+ mgmt_cmt_infos_init(&mm->cmts);
+ mgmt_history_read_cmt_record_index();
+}
+
+void mgmt_history_destroy(void)
+{
+ struct mgmt_cmt_info_t *cmt_info;
+
+ FOREACH_CMT_REC(mm, cmt_info) {
+ mgmt_cmt_infos_del(&mm->cmts, cmt_info);
+ XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
+ }
+
+ mgmt_cmt_infos_fini(&mm->cmts);
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ *
+ */
+#ifndef _FRR_MGMTD_HISTORY_H_
+#define _FRR_MGMTD_HISTORY_H_
+
+#include "vrf.h"
+
+PREDECL_DLIST(mgmt_cmt_infos);
+
+struct mgmt_ds_ctx;
+
+/*
+ * Rollback specific commit from commit history.
+ *
+ * vty
+ * VTY context.
+ *
+ * cmtid_str
+ * Specific commit id from commit history.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_history_rollback_by_id(struct vty *vty, const char *cmtid_str);
+
+/*
+ * Rollback n commits from commit history.
+ *
+ * vty
+ * VTY context.
+ *
+ * num_cmts
+ * Number of commits to be rolled back.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_history_rollback_n(struct vty *vty, int num_cmts);
+
+extern void mgmt_history_rollback_complete(bool success);
+
+/*
+ * Show mgmt commit history.
+ */
+extern void show_mgmt_cmt_history(struct vty *vty);
+
+extern void mgmt_history_new_record(struct mgmt_ds_ctx *ds_ctx);
+
+extern void mgmt_history_destroy(void);
+extern void mgmt_history_init(void);
+
+#endif /* _FRR_MGMTD_HISTORY_H_ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Main routine of mgmt.
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar
+ */
+
+#include <zebra.h>
+#include "lib/version.h"
+#include "routemap.h"
+#include "filter.h"
+#include "libfrr.h"
+#include "frr_pthread.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
+#include "routing_nb.h"
+
+
+/* mgmt options, we use GNU getopt library. */
+static const struct option longopts[] = {
+ {"skip_runas", no_argument, NULL, 'S'},
+ {"no_zebra", no_argument, NULL, 'Z'},
+ {"socket_size", required_argument, NULL, 's'},
+ {0}
+};
+
+static void mgmt_exit(int);
+static void mgmt_vrf_terminate(void);
+
+/* privileges */
+static zebra_capabilities_t _caps_p[] = {ZCAP_BIND, ZCAP_NET_RAW,
+ ZCAP_NET_ADMIN, ZCAP_SYS_ADMIN};
+
+struct zebra_privs_t mgmt_privs = {
+#if defined(FRR_USER) && defined(FRR_GROUP)
+ .user = FRR_USER,
+ .group = FRR_GROUP,
+#endif
+#ifdef VTY_GROUP
+ .vty_group = VTY_GROUP,
+#endif
+ .caps_p = _caps_p,
+ .cap_num_p = array_size(_caps_p),
+ .cap_num_i = 0,
+};
+
+static struct frr_daemon_info mgmtd_di;
+char backup_config_file[256];
+
+/* SIGHUP handler. */
+static void sighup(void)
+{
+ zlog_info("SIGHUP received, ignoring");
+
+ return;
+
+ /*
+ * This is turned off for the moment. There is all
+ * sorts of config turned off by mgmt_terminate
+ * that is not setup properly again in mgmt_reset.
+ * I see no easy way to do this nor do I see that
+ * this is a desirable way to reload config
+ * given the yang work.
+ */
+ /* Terminate all thread. */
+ mgmt_terminate();
+
+ /*
+ * mgmt_reset();
+ */
+ zlog_info("MGMTD restarting!");
+
+ /*
+ * Reload config file.
+ * vty_read_config(NULL, mgmtd_di.config_file, config_default);
+ */
+ /* Try to return to normal operation. */
+}
+
+/* SIGINT handler. */
+static __attribute__((__noreturn__)) void sigint(void)
+{
+ zlog_notice("Terminating on signal");
+ assert(mm->terminating == false);
+ mm->terminating = true; /* global flag that shutting down */
+
+ mgmt_terminate();
+
+ mgmt_exit(0);
+
+ exit(0);
+}
+
+/* SIGUSR1 handler. */
+static void sigusr1(void)
+{
+ zlog_rotate();
+}
+
+/*
+ * Try to free up allocations we know about so that diagnostic tools such as
+ * valgrind are able to better illuminate leaks.
+ *
+ * Zebra route removal and protocol teardown are not meant to be done here.
+ * For example, "retain_mode" may be set.
+ */
+static __attribute__((__noreturn__)) void mgmt_exit(int status)
+{
+ /* it only makes sense for this to be called on a clean exit */
+ assert(status == 0);
+
+ frr_early_fini();
+
+ /* stop pthreads (if any) */
+ frr_pthread_stop_all();
+
+ mgmt_vrf_terminate();
+
+ frr_fini();
+ exit(status);
+}
+
+static struct frr_signal_t mgmt_signals[] = {
+ {
+ .signal = SIGHUP,
+ .handler = &sighup,
+ },
+ {
+ .signal = SIGUSR1,
+ .handler = &sigusr1,
+ },
+ {
+ .signal = SIGINT,
+ .handler = &sigint,
+ },
+ {
+ .signal = SIGTERM,
+ .handler = &sigint,
+ },
+};
+
+static int mgmt_vrf_new(struct vrf *vrf)
+{
+ zlog_debug("VRF Created: %s(%u)", vrf->name, vrf->vrf_id);
+
+ return 0;
+}
+
+static int mgmt_vrf_delete(struct vrf *vrf)
+{
+ zlog_debug("VRF Deletion: %s(%u)", vrf->name, vrf->vrf_id);
+
+ return 0;
+}
+
+static int mgmt_vrf_enable(struct vrf *vrf)
+{
+ zlog_debug("VRF Enable: %s(%u)", vrf->name, vrf->vrf_id);
+
+ return 0;
+}
+
+static int mgmt_vrf_disable(struct vrf *vrf)
+{
+ zlog_debug("VRF Disable: %s(%u)", vrf->name, vrf->vrf_id);
+
+ /* Note: This is a callback, the VRF will be deleted by the caller. */
+ return 0;
+}
+
+static int mgmt_vrf_config_write(struct vty *vty)
+{
+ return 0;
+}
+
+static void mgmt_vrf_init(void)
+{
+ vrf_init(mgmt_vrf_new, mgmt_vrf_enable, mgmt_vrf_disable,
+ mgmt_vrf_delete);
+ vrf_cmd_init(mgmt_vrf_config_write);
+}
+
+static void mgmt_vrf_terminate(void)
+{
+ vrf_terminate();
+}
+
+/*
+ * List of YANG modules to be loaded in the process context of
+ * MGMTd.
+ *
+ * NOTE: In future this will also include the YANG modules of
+ * all individual Backend clients.
+ */
+static const struct frr_yang_module_info *const mgmt_yang_modules[] = {
+ &frr_filter_info, &frr_interface_info, &frr_route_map_info,
+ &frr_routing_info, &frr_vrf_info,
+/*
+ * YANG module info supported by backend clients get added here.
+ * NOTE: Always set .ignore_cbs true for to avoid validating
+ * backend northbound callbacks during loading.
+ */
+#ifdef HAVE_STATICD
+ &(struct frr_yang_module_info){.name = "frr-staticd",
+ .ignore_cbs = true},
+#endif
+};
+
+FRR_DAEMON_INFO(mgmtd, MGMTD, .vty_port = MGMTD_VTY_PORT,
+
+ .proghelp = "FRR Management Daemon.",
+
+ .signals = mgmt_signals, .n_signals = array_size(mgmt_signals),
+
+ .privs = &mgmt_privs, .yang_modules = mgmt_yang_modules,
+ .n_yang_modules = array_size(mgmt_yang_modules),
+);
+
+#define DEPRECATED_OPTIONS ""
+
+/* Main routine of mgmt. Treatment of argument and start mgmt finite
+ * state machine is handled at here.
+ */
+int main(int argc, char **argv)
+{
+ int opt;
+ int buffer_size = MGMTD_SOCKET_BUF_SIZE;
+
+ frr_preinit(&mgmtd_di, argc, argv);
+ frr_opt_add(
+ "s:" DEPRECATED_OPTIONS, longopts,
+ " -s, --socket_size Set MGMTD peer socket send buffer size\n");
+
+ /* Command line argument treatment. */
+ while (1) {
+ opt = frr_getopt(argc, argv, 0);
+
+ if (opt && opt < 128 && strchr(DEPRECATED_OPTIONS, opt)) {
+ fprintf(stderr,
+ "The -%c option no longer exists.\nPlease refer to the manual.\n",
+ opt);
+ continue;
+ }
+
+ if (opt == EOF)
+ break;
+
+ switch (opt) {
+ case 0:
+ break;
+ case 's':
+ buffer_size = atoi(optarg);
+ break;
+ default:
+ frr_help_exit(1);
+ break;
+ }
+ }
+
+ /* MGMTD master init. */
+ mgmt_master_init(frr_init(), buffer_size);
+
+ /* VRF Initializations. */
+ mgmt_vrf_init();
+
+ /* MGMTD related initialization. */
+ mgmt_init();
+
+ snprintf(backup_config_file, sizeof(backup_config_file),
+ "%s/zebra.conf", frr_sysconfdir);
+ mgmtd_di.backup_config_file = backup_config_file;
+
+ frr_config_fork();
+
+ frr_run(mm->master);
+
+ /* Not reached. */
+ return 0;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * mgmt memory type definitions
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "mgmt_memory.h"
+
+/* this file is temporary in nature; definitions should be moved to the
+ * files they're used in
+ */
+
+DEFINE_MGROUP(MGMTD, "mgmt");
+DEFINE_MTYPE(MGMTD, MGMTD, "MGMTD instance");
+DEFINE_MTYPE(MGMTD, MGMTD_BE_ADPATER, "MGMTD backend adapter");
+DEFINE_MTYPE(MGMTD, MGMTD_FE_ADPATER, "MGMTD Frontend adapter");
+DEFINE_MTYPE(MGMTD, MGMTD_FE_SESSION, "MGMTD Frontend Client Session");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN, "MGMTD Transaction");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_REQ, "MGMTD Transaction Requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_SETCFG_REQ,
+ "MGMTD Transaction Set-Config Requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_COMMCFG_REQ,
+ "MGMTD Transaction Commit-Config Requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETDATA_REQ,
+ "MGMTD Transaction Get-Data Requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETDATA_REPLY,
+ "MGMTD Transaction Get-Data Replies");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_CFG_BATCH, "MGMTD Transaction Gonfig Batches");
+DEFINE_MTYPE(MGMTD, MGMTD_CMT_INFO, "MGMTD commit info for tracking commits");
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * mgmt memory type declarations
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_MEMORY_H
+#define _FRR_MGMTD_MEMORY_H
+
+#include "memory.h"
+
+DECLARE_MGROUP(MGMTD);
+DECLARE_MTYPE(MGMTD);
+DECLARE_MTYPE(MGMTD_BE_ADPATER);
+DECLARE_MTYPE(MGMTD_FE_ADPATER);
+DECLARE_MTYPE(MGMTD_FE_SESSION);
+DECLARE_MTYPE(MGMTD_TXN);
+DECLARE_MTYPE(MGMTD_TXN_REQ);
+DECLARE_MTYPE(MGMTD_TXN_SETCFG_REQ);
+DECLARE_MTYPE(MGMTD_TXN_COMMCFG_REQ);
+DECLARE_MTYPE(MGMTD_TXN_GETDATA_REQ);
+DECLARE_MTYPE(MGMTD_TXN_GETDATA_REPLY);
+DECLARE_MTYPE(MGMTD_TXN_CFG_BATCH);
+DECLARE_MTYPE(MGMTD_BE_ADAPTER_MSG_BUF);
+DECLARE_MTYPE(MGMTD_CMT_INFO);
+#endif /* _FRR_MGMTD_MEMORY_H */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Transactions
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "hash.h"
+#include "jhash.h"
+#include "libfrr.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_txn.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_TXN_DBG(fmt, ...) \
+ fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_TXN_ERR(fmt, ...) \
+ fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_TXN_DBG(fmt, ...) \
+ do { \
+ if (mgmt_debug_txn) \
+ zlog_err("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define MGMTD_TXN_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+#define MGMTD_TXN_LOCK(txn) mgmt_txn_lock(txn, __FILE__, __LINE__)
+#define MGMTD_TXN_UNLOCK(txn) mgmt_txn_unlock(txn, __FILE__, __LINE__)
+
+enum mgmt_txn_event {
+ MGMTD_TXN_PROC_SETCFG = 1,
+ MGMTD_TXN_PROC_COMMITCFG,
+ MGMTD_TXN_PROC_GETCFG,
+ MGMTD_TXN_PROC_GETDATA,
+ MGMTD_TXN_COMMITCFG_TIMEOUT,
+ MGMTD_TXN_CLEANUP
+};
+
+PREDECL_LIST(mgmt_txn_reqs);
+
+struct mgmt_set_cfg_req {
+ Mgmtd__DatastoreId ds_id;
+ struct mgmt_ds_ctx *ds_ctx;
+ struct nb_cfg_change cfg_changes[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ uint16_t num_cfg_changes;
+ bool implicit_commit;
+ Mgmtd__DatastoreId dst_ds_id;
+ struct mgmt_ds_ctx *dst_ds_ctx;
+ struct mgmt_setcfg_stats *setcfg_stats;
+};
+
+enum mgmt_commit_phase {
+ MGMTD_COMMIT_PHASE_PREPARE_CFG = 0,
+ MGMTD_COMMIT_PHASE_TXN_CREATE,
+ MGMTD_COMMIT_PHASE_SEND_CFG,
+ MGMTD_COMMIT_PHASE_APPLY_CFG,
+ MGMTD_COMMIT_PHASE_TXN_DELETE,
+ MGMTD_COMMIT_PHASE_MAX
+};
+
+static inline const char *
+mgmt_commit_phase2str(enum mgmt_commit_phase cmt_phase)
+{
+ switch (cmt_phase) {
+ case MGMTD_COMMIT_PHASE_PREPARE_CFG:
+ return "PREP-CFG";
+ case MGMTD_COMMIT_PHASE_TXN_CREATE:
+ return "CREATE-TXN";
+ case MGMTD_COMMIT_PHASE_SEND_CFG:
+ return "SEND-CFG";
+ case MGMTD_COMMIT_PHASE_APPLY_CFG:
+ return "APPLY-CFG";
+ case MGMTD_COMMIT_PHASE_TXN_DELETE:
+ return "DELETE-TXN";
+ case MGMTD_COMMIT_PHASE_MAX:
+ return "Invalid/Unknown";
+ }
+
+ return "Invalid/Unknown";
+}
+
+PREDECL_LIST(mgmt_txn_batches);
+
+struct mgmt_txn_be_cfg_batch {
+ struct mgmt_txn_ctx *txn;
+ uint64_t batch_id;
+ enum mgmt_be_client_id be_id;
+ struct mgmt_be_client_adapter *be_adapter;
+ union mgmt_be_xpath_subscr_info
+ xp_subscr[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ Mgmtd__YangCfgDataReq cfg_data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ Mgmtd__YangCfgDataReq * cfg_datap[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ Mgmtd__YangData data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ Mgmtd__YangDataValue value[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ size_t num_cfg_data;
+ int buf_space_left;
+ enum mgmt_commit_phase comm_phase;
+ struct mgmt_txn_batches_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_txn_batches, struct mgmt_txn_be_cfg_batch, list_linkage);
+
+#define FOREACH_TXN_CFG_BATCH_IN_LIST(list, batch) \
+ frr_each_safe (mgmt_txn_batches, list, batch)
+
+struct mgmt_commit_cfg_req {
+ Mgmtd__DatastoreId src_ds_id;
+ struct mgmt_ds_ctx *src_ds_ctx;
+ Mgmtd__DatastoreId dst_ds_id;
+ struct mgmt_ds_ctx *dst_ds_ctx;
+ uint32_t nb_txn_id;
+ uint8_t validate_only : 1;
+ uint8_t abort : 1;
+ uint8_t implicit : 1;
+ uint8_t rollback : 1;
+
+ /* Track commit phases */
+ enum mgmt_commit_phase curr_phase;
+ enum mgmt_commit_phase next_phase;
+
+ /*
+ * Set of config changes to commit. This is used only
+ * when changes are NOT to be determined by comparing
+ * candidate and running DSs. This is typically used
+ * for downloading all relevant configs for a new backend
+ * client that has recently come up and connected with
+ * MGMTD.
+ */
+ struct nb_config_cbs *cfg_chgs;
+
+ /*
+ * Details on all the Backend Clients associated with
+ * this commit.
+ */
+ struct mgmt_be_client_subscr_info subscr_info;
+
+ /*
+ * List of backend batches for this commit to be validated
+ * and applied at the backend.
+ *
+ * FIXME: Need to re-think this design for the case set of
+ * validators for a given YANG data item is different from
+ * the set of notifiers for the same. We may need to have
+ * separate list of batches for VALIDATE and APPLY.
+ */
+ struct mgmt_txn_batches_head curr_batches[MGMTD_BE_CLIENT_ID_MAX];
+ struct mgmt_txn_batches_head next_batches[MGMTD_BE_CLIENT_ID_MAX];
+ /*
+ * The last batch added for any backend client. This is always on
+ * 'curr_batches'
+ */
+ struct mgmt_txn_be_cfg_batch
+ *last_be_cfg_batch[MGMTD_BE_CLIENT_ID_MAX];
+ struct hash *batches;
+ uint64_t next_batch_id;
+
+ struct mgmt_commit_stats *cmt_stats;
+};
+
+struct mgmt_get_data_reply {
+ /* Buffer space for preparing data reply */
+ int num_reply;
+ int last_batch;
+ Mgmtd__YangDataReply data_reply;
+ Mgmtd__YangData reply_data[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+ Mgmtd__YangData * reply_datap[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+ Mgmtd__YangDataValue reply_value[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+ char *reply_xpathp[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+};
+
+struct mgmt_get_data_req {
+ Mgmtd__DatastoreId ds_id;
+ struct mgmt_ds_ctx *ds_ctx;
+ char *xpaths[MGMTD_MAX_NUM_DATA_REQ_IN_BATCH];
+ int num_xpaths;
+
+ /*
+ * Buffer space for preparing reply.
+ * NOTE: Should only be malloc-ed on demand to reduce
+ * memory footprint. Freed up via mgmt_trx_req_free()
+ */
+ struct mgmt_get_data_reply *reply;
+
+ int total_reply;
+};
+
+struct mgmt_txn_req {
+ struct mgmt_txn_ctx *txn;
+ enum mgmt_txn_event req_event;
+ uint64_t req_id;
+ union {
+ struct mgmt_set_cfg_req *set_cfg;
+ struct mgmt_get_data_req *get_data;
+ struct mgmt_commit_cfg_req commit_cfg;
+ } req;
+
+ bool pending_be_proc;
+ struct mgmt_txn_reqs_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_txn_reqs, struct mgmt_txn_req, list_linkage);
+
+#define FOREACH_TXN_REQ_IN_LIST(list, req) \
+ frr_each_safe (mgmt_txn_reqs, list, req)
+
+struct mgmt_txn_ctx {
+ uint64_t session_id; /* One transaction per client session */
+ uint64_t txn_id;
+ enum mgmt_txn_type type;
+
+ /* struct mgmt_master *mm; */
+
+ struct event *proc_set_cfg;
+ struct event *proc_comm_cfg;
+ struct event *proc_get_cfg;
+ struct event *proc_get_data;
+ struct event *comm_cfg_timeout;
+ struct event *clnup;
+
+ /* List of backend adapters involved in this transaction */
+ struct mgmt_txn_badapters_head be_adapters;
+
+ int refcount;
+
+ struct mgmt_txns_item list_linkage;
+
+ /*
+ * List of pending set-config requests for a given
+ * transaction/session. Just one list for requests
+ * not processed at all. There's no backend interaction
+ * involved.
+ */
+ struct mgmt_txn_reqs_head set_cfg_reqs;
+ /*
+ * List of pending get-config requests for a given
+ * transaction/session. Just one list for requests
+ * not processed at all. There's no backend interaction
+ * involved.
+ */
+ struct mgmt_txn_reqs_head get_cfg_reqs;
+ /*
+ * List of pending get-data requests for a given
+ * transaction/session Two lists, one for requests
+ * not processed at all, and one for requests that
+ * has been sent to backend for processing.
+ */
+ struct mgmt_txn_reqs_head get_data_reqs;
+ struct mgmt_txn_reqs_head pending_get_datas;
+ /*
+ * There will always be one commit-config allowed for a given
+ * transaction/session. No need to maintain lists for it.
+ */
+ struct mgmt_txn_req *commit_cfg_req;
+};
+
+DECLARE_LIST(mgmt_txns, struct mgmt_txn_ctx, list_linkage);
+
+#define FOREACH_TXN_IN_LIST(mm, txn) \
+ frr_each_safe (mgmt_txns, &(mm)->txn_list, (txn))
+
+static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
+ enum mgmt_result result,
+ const char *error_if_any);
+
+static inline const char *
+mgmt_txn_commit_phase_str(struct mgmt_txn_ctx *txn, bool curr)
+{
+ if (!txn->commit_cfg_req)
+ return "None";
+
+ return (mgmt_commit_phase2str(
+ curr ? txn->commit_cfg_req->req.commit_cfg.curr_phase
+ : txn->commit_cfg_req->req.commit_cfg.next_phase));
+}
+
+static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
+ int line);
+static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
+ int line);
+static int
+mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter);
+
+static struct event_loop *mgmt_txn_tm;
+static struct mgmt_master *mgmt_txn_mm;
+
+static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
+ enum mgmt_txn_event event);
+
+static int
+mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter);
+
+static struct mgmt_txn_be_cfg_batch *
+mgmt_txn_cfg_batch_alloc(struct mgmt_txn_ctx *txn,
+ enum mgmt_be_client_id id,
+ struct mgmt_be_client_adapter *be_adapter)
+{
+ struct mgmt_txn_be_cfg_batch *cfg_btch;
+
+ cfg_btch = XCALLOC(MTYPE_MGMTD_TXN_CFG_BATCH,
+ sizeof(struct mgmt_txn_be_cfg_batch));
+ assert(cfg_btch);
+ cfg_btch->be_id = id;
+
+ cfg_btch->txn = txn;
+ MGMTD_TXN_LOCK(txn);
+ assert(txn->commit_cfg_req);
+ mgmt_txn_batches_add_tail(
+ &txn->commit_cfg_req->req.commit_cfg.curr_batches[id],
+ cfg_btch);
+ cfg_btch->be_adapter = be_adapter;
+ cfg_btch->buf_space_left = MGMTD_BE_CFGDATA_MAX_MSG_LEN;
+ if (be_adapter)
+ mgmt_be_adapter_lock(be_adapter);
+
+ txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] =
+ cfg_btch;
+ if (!txn->commit_cfg_req->req.commit_cfg.next_batch_id)
+ txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
+ cfg_btch->batch_id =
+ txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
+ hash_get(txn->commit_cfg_req->req.commit_cfg.batches, cfg_btch,
+ hash_alloc_intern);
+
+ return cfg_btch;
+}
+
+static void
+mgmt_txn_cfg_batch_free(struct mgmt_txn_be_cfg_batch **cfg_btch)
+{
+ size_t indx;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+
+ MGMTD_TXN_DBG(" Batch: %p, Txn: %p", *cfg_btch, (*cfg_btch)->txn);
+
+ assert((*cfg_btch)->txn
+ && (*cfg_btch)->txn->type == MGMTD_TXN_TYPE_CONFIG);
+
+ cmtcfg_req = &(*cfg_btch)->txn->commit_cfg_req->req.commit_cfg;
+ hash_release(cmtcfg_req->batches, *cfg_btch);
+ mgmt_txn_batches_del(&cmtcfg_req->curr_batches[(*cfg_btch)->be_id],
+ *cfg_btch);
+ mgmt_txn_batches_del(&cmtcfg_req->next_batches[(*cfg_btch)->be_id],
+ *cfg_btch);
+
+ if ((*cfg_btch)->be_adapter)
+ mgmt_be_adapter_unlock(&(*cfg_btch)->be_adapter);
+
+ for (indx = 0; indx < (*cfg_btch)->num_cfg_data; indx++) {
+ if ((*cfg_btch)->data[indx].xpath) {
+ free((*cfg_btch)->data[indx].xpath);
+ (*cfg_btch)->data[indx].xpath = NULL;
+ }
+ }
+
+ MGMTD_TXN_UNLOCK(&(*cfg_btch)->txn);
+
+ XFREE(MTYPE_MGMTD_TXN_CFG_BATCH, *cfg_btch);
+ *cfg_btch = NULL;
+}
+
+static unsigned int mgmt_txn_cfgbatch_hash_key(const void *data)
+{
+ const struct mgmt_txn_be_cfg_batch *batch = data;
+
+ return jhash2((uint32_t *) &batch->batch_id,
+ sizeof(batch->batch_id) / sizeof(uint32_t), 0);
+}
+
+static bool mgmt_txn_cfgbatch_hash_cmp(const void *d1, const void *d2)
+{
+ const struct mgmt_txn_be_cfg_batch *batch1 = d1;
+ const struct mgmt_txn_be_cfg_batch *batch2 = d2;
+
+ return (batch1->batch_id == batch2->batch_id);
+}
+
+static void mgmt_txn_cfgbatch_hash_free(void *data)
+{
+ struct mgmt_txn_be_cfg_batch *batch = data;
+
+ mgmt_txn_cfg_batch_free(&batch);
+}
+
+static inline struct mgmt_txn_be_cfg_batch *
+mgmt_txn_cfgbatch_id2ctx(struct mgmt_txn_ctx *txn, uint64_t batch_id)
+{
+ struct mgmt_txn_be_cfg_batch key = {0};
+ struct mgmt_txn_be_cfg_batch *batch;
+
+ if (!txn->commit_cfg_req)
+ return NULL;
+
+ key.batch_id = batch_id;
+ batch = hash_lookup(txn->commit_cfg_req->req.commit_cfg.batches,
+ &key);
+
+ return batch;
+}
+
+static void mgmt_txn_cleanup_be_cfg_batches(struct mgmt_txn_ctx *txn,
+ enum mgmt_be_client_id id)
+{
+ struct mgmt_txn_be_cfg_batch *cfg_btch;
+ struct mgmt_txn_batches_head *list;
+
+ list = &txn->commit_cfg_req->req.commit_cfg.curr_batches[id];
+ FOREACH_TXN_CFG_BATCH_IN_LIST (list, cfg_btch)
+ mgmt_txn_cfg_batch_free(&cfg_btch);
+
+ mgmt_txn_batches_fini(list);
+
+ list = &txn->commit_cfg_req->req.commit_cfg.next_batches[id];
+ FOREACH_TXN_CFG_BATCH_IN_LIST (list, cfg_btch)
+ mgmt_txn_cfg_batch_free(&cfg_btch);
+
+ mgmt_txn_batches_fini(list);
+
+ txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] = NULL;
+}
+
+static struct mgmt_txn_req *mgmt_txn_req_alloc(struct mgmt_txn_ctx *txn,
+ uint64_t req_id,
+ enum mgmt_txn_event req_event)
+{
+ struct mgmt_txn_req *txn_req;
+ enum mgmt_be_client_id id;
+
+ txn_req = XCALLOC(MTYPE_MGMTD_TXN_REQ, sizeof(struct mgmt_txn_req));
+ assert(txn_req);
+ txn_req->txn = txn;
+ txn_req->req_id = req_id;
+ txn_req->req_event = req_event;
+ txn_req->pending_be_proc = false;
+
+ switch (txn_req->req_event) {
+ case MGMTD_TXN_PROC_SETCFG:
+ txn_req->req.set_cfg = XCALLOC(MTYPE_MGMTD_TXN_SETCFG_REQ,
+ sizeof(struct mgmt_set_cfg_req));
+ assert(txn_req->req.set_cfg);
+ mgmt_txn_reqs_add_tail(&txn->set_cfg_reqs, txn_req);
+ MGMTD_TXN_DBG(
+ "Added a new SETCFG Req: %p for Txn: %p, Sessn: 0x%llx",
+ txn_req, txn, (unsigned long long)txn->session_id);
+ break;
+ case MGMTD_TXN_PROC_COMMITCFG:
+ txn->commit_cfg_req = txn_req;
+ MGMTD_TXN_DBG(
+ "Added a new COMMITCFG Req: %p for Txn: %p, Sessn: 0x%llx",
+ txn_req, txn, (unsigned long long)txn->session_id);
+
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ mgmt_txn_batches_init(
+ &txn_req->req.commit_cfg.curr_batches[id]);
+ mgmt_txn_batches_init(
+ &txn_req->req.commit_cfg.next_batches[id]);
+ }
+
+ txn_req->req.commit_cfg.batches =
+ hash_create(mgmt_txn_cfgbatch_hash_key,
+ mgmt_txn_cfgbatch_hash_cmp,
+ "MGMT Config Batches");
+ break;
+ case MGMTD_TXN_PROC_GETCFG:
+ txn_req->req.get_data =
+ XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
+ sizeof(struct mgmt_get_data_req));
+ assert(txn_req->req.get_data);
+ mgmt_txn_reqs_add_tail(&txn->get_cfg_reqs, txn_req);
+ MGMTD_TXN_DBG(
+ "Added a new GETCFG Req: %p for Txn: %p, Sessn: 0x%llx",
+ txn_req, txn, (unsigned long long)txn->session_id);
+ break;
+ case MGMTD_TXN_PROC_GETDATA:
+ txn_req->req.get_data =
+ XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
+ sizeof(struct mgmt_get_data_req));
+ assert(txn_req->req.get_data);
+ mgmt_txn_reqs_add_tail(&txn->get_data_reqs, txn_req);
+ MGMTD_TXN_DBG(
+ "Added a new GETDATA Req: %p for Txn: %p, Sessn: 0x%llx",
+ txn_req, txn, (unsigned long long)txn->session_id);
+ break;
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ case MGMTD_TXN_CLEANUP:
+ break;
+ }
+
+ MGMTD_TXN_LOCK(txn);
+
+ return txn_req;
+}
+
+static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
+{
+ int indx;
+ struct mgmt_txn_reqs_head *req_list = NULL;
+ struct mgmt_txn_reqs_head *pending_list = NULL;
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+
+ switch ((*txn_req)->req_event) {
+ case MGMTD_TXN_PROC_SETCFG:
+ for (indx = 0; indx < (*txn_req)->req.set_cfg->num_cfg_changes;
+ indx++) {
+ if ((*txn_req)->req.set_cfg->cfg_changes[indx].value) {
+ MGMTD_TXN_DBG(
+ "Freeing value for %s at %p ==> '%s'",
+ (*txn_req)
+ ->req.set_cfg->cfg_changes[indx]
+ .xpath,
+ (*txn_req)
+ ->req.set_cfg->cfg_changes[indx]
+ .value,
+ (*txn_req)
+ ->req.set_cfg->cfg_changes[indx]
+ .value);
+ free((void *)(*txn_req)
+ ->req.set_cfg->cfg_changes[indx]
+ .value);
+ }
+ }
+ req_list = &(*txn_req)->txn->set_cfg_reqs;
+ MGMTD_TXN_DBG("Deleting SETCFG Req: %p for Txn: %p",
+ *txn_req, (*txn_req)->txn);
+ XFREE(MTYPE_MGMTD_TXN_SETCFG_REQ, (*txn_req)->req.set_cfg);
+ break;
+ case MGMTD_TXN_PROC_COMMITCFG:
+ MGMTD_TXN_DBG("Deleting COMMITCFG Req: %p for Txn: %p",
+ *txn_req, (*txn_req)->txn);
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ /*
+ * Send TXN_DELETE to cleanup state for this
+ * transaction on backend
+ */
+ if ((*txn_req)->req.commit_cfg.curr_phase
+ >= MGMTD_COMMIT_PHASE_TXN_CREATE
+ && (*txn_req)->req.commit_cfg.curr_phase
+ < MGMTD_COMMIT_PHASE_TXN_DELETE
+ && (*txn_req)
+ ->req.commit_cfg.subscr_info
+ .xpath_subscr[id]
+ .subscribed) {
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (adapter)
+ mgmt_txn_send_be_txn_delete(
+ (*txn_req)->txn, adapter);
+ }
+
+ mgmt_txn_cleanup_be_cfg_batches((*txn_req)->txn,
+ id);
+ if ((*txn_req)->req.commit_cfg.batches) {
+ hash_clean((*txn_req)->req.commit_cfg.batches,
+ mgmt_txn_cfgbatch_hash_free);
+ hash_free((*txn_req)->req.commit_cfg.batches);
+ (*txn_req)->req.commit_cfg.batches = NULL;
+ }
+ }
+ break;
+ case MGMTD_TXN_PROC_GETCFG:
+ for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
+ indx++) {
+ if ((*txn_req)->req.get_data->xpaths[indx])
+ free((void *)(*txn_req)
+ ->req.get_data->xpaths[indx]);
+ }
+ req_list = &(*txn_req)->txn->get_cfg_reqs;
+ MGMTD_TXN_DBG("Deleting GETCFG Req: %p for Txn: %p",
+ *txn_req, (*txn_req)->txn);
+ if ((*txn_req)->req.get_data->reply)
+ XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
+ (*txn_req)->req.get_data->reply);
+ XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
+ break;
+ case MGMTD_TXN_PROC_GETDATA:
+ for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
+ indx++) {
+ if ((*txn_req)->req.get_data->xpaths[indx])
+ free((void *)(*txn_req)
+ ->req.get_data->xpaths[indx]);
+ }
+ pending_list = &(*txn_req)->txn->pending_get_datas;
+ req_list = &(*txn_req)->txn->get_data_reqs;
+ MGMTD_TXN_DBG("Deleting GETDATA Req: %p for Txn: %p",
+ *txn_req, (*txn_req)->txn);
+ if ((*txn_req)->req.get_data->reply)
+ XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
+ (*txn_req)->req.get_data->reply);
+ XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
+ break;
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ case MGMTD_TXN_CLEANUP:
+ break;
+ }
+
+ if ((*txn_req)->pending_be_proc && pending_list) {
+ mgmt_txn_reqs_del(pending_list, *txn_req);
+ MGMTD_TXN_DBG("Removed Req: %p from pending-list (left:%d)",
+ *txn_req, (int)mgmt_txn_reqs_count(pending_list));
+ } else if (req_list) {
+ mgmt_txn_reqs_del(req_list, *txn_req);
+ MGMTD_TXN_DBG("Removed Req: %p from request-list (left:%d)",
+ *txn_req, (int)mgmt_txn_reqs_count(req_list));
+ }
+
+ (*txn_req)->pending_be_proc = false;
+ MGMTD_TXN_UNLOCK(&(*txn_req)->txn);
+ XFREE(MTYPE_MGMTD_TXN_REQ, (*txn_req));
+ *txn_req = NULL;
+}
+
+static void mgmt_txn_process_set_cfg(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ struct mgmt_ds_ctx *ds_ctx;
+ struct nb_config *nb_config;
+ char err_buf[1024];
+ bool error;
+ int num_processed = 0;
+ size_t left;
+ struct mgmt_commit_stats *cmt_stats;
+ int ret = 0;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+ cmt_stats = mgmt_fe_get_session_commit_stats(txn->session_id);
+
+ MGMTD_TXN_DBG(
+ "Processing %d SET_CONFIG requests for Txn:%p Session:0x%llx",
+ (int)mgmt_txn_reqs_count(&txn->set_cfg_reqs), txn,
+ (unsigned long long)txn->session_id);
+
+ FOREACH_TXN_REQ_IN_LIST (&txn->set_cfg_reqs, txn_req) {
+ error = false;
+ assert(txn_req->req_event == MGMTD_TXN_PROC_SETCFG);
+ ds_ctx = txn_req->req.set_cfg->ds_ctx;
+ if (!ds_ctx) {
+ mgmt_fe_send_set_cfg_reply(
+ txn->session_id, txn->txn_id,
+ txn_req->req.set_cfg->ds_id, txn_req->req_id,
+ MGMTD_INTERNAL_ERROR, "No such datastore!",
+ txn_req->req.set_cfg->implicit_commit);
+ error = true;
+ goto mgmt_txn_process_set_cfg_done;
+ }
+
+ nb_config = mgmt_ds_get_nb_config(ds_ctx);
+ if (!nb_config) {
+ mgmt_fe_send_set_cfg_reply(
+ txn->session_id, txn->txn_id,
+ txn_req->req.set_cfg->ds_id, txn_req->req_id,
+ MGMTD_INTERNAL_ERROR,
+ "Unable to retrieve DS Config Tree!",
+ txn_req->req.set_cfg->implicit_commit);
+ error = true;
+ goto mgmt_txn_process_set_cfg_done;
+ }
+
+ error = false;
+ nb_candidate_edit_config_changes(
+ nb_config, txn_req->req.set_cfg->cfg_changes,
+ (size_t)txn_req->req.set_cfg->num_cfg_changes, NULL,
+ NULL, 0, err_buf, sizeof(err_buf), &error);
+ if (error) {
+ mgmt_fe_send_set_cfg_reply(
+ txn->session_id, txn->txn_id,
+ txn_req->req.set_cfg->ds_id, txn_req->req_id,
+ MGMTD_INTERNAL_ERROR, err_buf,
+ txn_req->req.set_cfg->implicit_commit);
+ goto mgmt_txn_process_set_cfg_done;
+ }
+
+ if (txn_req->req.set_cfg->implicit_commit) {
+ assert(mgmt_txn_reqs_count(&txn->set_cfg_reqs) == 1);
+ assert(txn_req->req.set_cfg->dst_ds_ctx);
+
+ ret = mgmt_ds_write_lock(
+ txn_req->req.set_cfg->dst_ds_ctx);
+ if (ret != 0) {
+ MGMTD_TXN_ERR(
+ "Failed to lock the DS %u for txn: %p session 0x%llx, errstr %s!",
+ txn_req->req.set_cfg->dst_ds_id, txn,
+ (unsigned long long)txn->session_id,
+ strerror(ret));
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_DS_LOCK_FAILED,
+ "Lock running DS before implicit commit failed!");
+ goto mgmt_txn_process_set_cfg_done;
+ }
+
+ mgmt_txn_send_commit_config_req(
+ txn->txn_id, txn_req->req_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req.set_cfg->ds_ctx,
+ txn_req->req.set_cfg->dst_ds_id,
+ txn_req->req.set_cfg->dst_ds_ctx, false,
+ false, true);
+
+ if (mm->perf_stats_en)
+ gettimeofday(&cmt_stats->last_start, NULL);
+ cmt_stats->commit_cnt++;
+ } else if (mgmt_fe_send_set_cfg_reply(
+ txn->session_id, txn->txn_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req_id, MGMTD_SUCCESS, NULL, false)
+ != 0) {
+ MGMTD_TXN_ERR(
+ "Failed to send SET_CONFIG_REPLY for txn %p session 0x%llx",
+ txn, (unsigned long long)txn->session_id);
+ error = true;
+ }
+
+ mgmt_txn_process_set_cfg_done:
+
+ /*
+ * Note: The following will remove it from the list as well.
+ */
+ mgmt_txn_req_free(&txn_req);
+
+ num_processed++;
+ if (num_processed == MGMTD_TXN_MAX_NUM_SETCFG_PROC)
+ break;
+ }
+
+ left = mgmt_txn_reqs_count(&txn->set_cfg_reqs);
+ if (left) {
+ MGMTD_TXN_DBG(
+ "Processed maximum number of Set-Config requests (%d/%d/%d). Rescheduling for rest.",
+ num_processed, MGMTD_TXN_MAX_NUM_SETCFG_PROC,
+ (int)left);
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
+ }
+}
+
+static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
+ enum mgmt_result result,
+ const char *error_if_any)
+{
+ int ret = 0;
+ bool success, create_cmt_info_rec;
+
+ if (!txn->commit_cfg_req)
+ return -1;
+
+ success = (result == MGMTD_SUCCESS || result == MGMTD_NO_CFG_CHANGES);
+
+ if (!txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
+ && mgmt_fe_send_commit_cfg_reply(
+ txn->session_id, txn->txn_id,
+ txn->commit_cfg_req->req.commit_cfg.src_ds_id,
+ txn->commit_cfg_req->req.commit_cfg.dst_ds_id,
+ txn->commit_cfg_req->req_id,
+ txn->commit_cfg_req->req.commit_cfg.validate_only,
+ result, error_if_any)
+ != 0) {
+ MGMTD_TXN_ERR(
+ "Failed to send COMMIT-CONFIG-REPLY for Txn %p Sessn 0x%llx",
+ txn, (unsigned long long)txn->session_id);
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
+ && mgmt_fe_send_set_cfg_reply(
+ txn->session_id, txn->txn_id,
+ txn->commit_cfg_req->req.commit_cfg.src_ds_id,
+ txn->commit_cfg_req->req_id,
+ success ? MGMTD_SUCCESS : MGMTD_INTERNAL_ERROR,
+ error_if_any, true)
+ != 0) {
+ MGMTD_TXN_ERR(
+ "Failed to send SET-CONFIG-REPLY for Txn %p Sessn 0x%llx",
+ txn, (unsigned long long)txn->session_id);
+ }
+
+ if (success) {
+ /* Stop the commit-timeout timer */
+ EVENT_OFF(txn->comm_cfg_timeout);
+
+ create_cmt_info_rec =
+ (result != MGMTD_NO_CFG_CHANGES &&
+ !txn->commit_cfg_req->req.commit_cfg.rollback);
+
+ /*
+ * Successful commit: Merge Src DS into Dst DS if and only if
+ * this was not a validate-only or abort request.
+ */
+ if ((txn->session_id
+ && !txn->commit_cfg_req->req.commit_cfg.validate_only
+ && !txn->commit_cfg_req->req.commit_cfg.abort)
+ || txn->commit_cfg_req->req.commit_cfg.rollback) {
+ mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
+ .src_ds_ctx,
+ txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_ctx,
+ create_cmt_info_rec);
+ }
+
+ /*
+ * Restore Src DS back to Dest DS only through a commit abort
+ * request.
+ */
+ if (txn->session_id
+ && txn->commit_cfg_req->req.commit_cfg.abort)
+ mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_ctx,
+ txn->commit_cfg_req->req.commit_cfg
+ .src_ds_ctx,
+ false);
+ } else {
+ /*
+ * The commit has failied. For implicit commit requests restore
+ * back the contents of the candidate DS.
+ */
+ if (txn->commit_cfg_req->req.commit_cfg.implicit)
+ mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_ctx,
+ txn->commit_cfg_req->req.commit_cfg
+ .src_ds_ctx,
+ false);
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.rollback) {
+ ret = mgmt_ds_unlock(
+ txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx);
+ if (ret != 0)
+ MGMTD_TXN_ERR(
+ "Failed to unlock the dst DS during rollback : %s",
+ strerror(ret));
+
+ /*
+ * Resume processing the rollback command.
+ */
+ mgmt_history_rollback_complete(success);
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.implicit)
+ if (mgmt_ds_unlock(
+ txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx)
+ != 0)
+ MGMTD_TXN_ERR(
+ "Failed to unlock the dst DS during implicit : %s",
+ strerror(ret));
+
+ txn->commit_cfg_req->req.commit_cfg.cmt_stats = NULL;
+ mgmt_txn_req_free(&txn->commit_cfg_req);
+
+ /*
+ * The CONFIG Transaction should be destroyed from Frontend-adapter.
+ * But in case the transaction is not triggered from a front-end session
+ * we need to cleanup by itself.
+ */
+ if (!txn->session_id)
+ mgmt_txn_register_event(txn, MGMTD_TXN_CLEANUP);
+
+ return 0;
+}
+
+static void
+mgmt_move_txn_cfg_batch_to_next(struct mgmt_commit_cfg_req *cmtcfg_req,
+ struct mgmt_txn_be_cfg_batch *cfg_btch,
+ struct mgmt_txn_batches_head *src_list,
+ struct mgmt_txn_batches_head *dst_list,
+ bool update_commit_phase,
+ enum mgmt_commit_phase to_phase)
+{
+ mgmt_txn_batches_del(src_list, cfg_btch);
+
+ if (update_commit_phase) {
+ MGMTD_TXN_DBG("Move Txn-Id %p Batch-Id %p from '%s' --> '%s'",
+ cfg_btch->txn, cfg_btch,
+ mgmt_commit_phase2str(cfg_btch->comm_phase),
+ mgmt_txn_commit_phase_str(cfg_btch->txn, false));
+ cfg_btch->comm_phase = to_phase;
+ }
+
+ mgmt_txn_batches_add_tail(dst_list, cfg_btch);
+}
+
+static void mgmt_move_txn_cfg_batches(struct mgmt_txn_ctx *txn,
+ struct mgmt_commit_cfg_req *cmtcfg_req,
+ struct mgmt_txn_batches_head *src_list,
+ struct mgmt_txn_batches_head *dst_list,
+ bool update_commit_phase,
+ enum mgmt_commit_phase to_phase)
+{
+ struct mgmt_txn_be_cfg_batch *cfg_btch;
+
+ FOREACH_TXN_CFG_BATCH_IN_LIST (src_list, cfg_btch) {
+ mgmt_move_txn_cfg_batch_to_next(cmtcfg_req, cfg_btch, src_list,
+ dst_list, update_commit_phase,
+ to_phase);
+ }
+}
+
+static int
+mgmt_try_move_commit_to_next_phase(struct mgmt_txn_ctx *txn,
+ struct mgmt_commit_cfg_req *cmtcfg_req)
+{
+ struct mgmt_txn_batches_head *curr_list, *next_list;
+ enum mgmt_be_client_id id;
+
+ MGMTD_TXN_DBG("Txn-Id %p, Phase(current:'%s' next:'%s')", txn,
+ mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
+
+ /*
+ * Check if all clients has moved to next phase or not.
+ */
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (cmtcfg_req->subscr_info.xpath_subscr[id].subscribed &&
+ mgmt_txn_batches_count(&cmtcfg_req->curr_batches[id])) {
+ /*
+ * There's atleast once client who hasn't moved to
+ * next phase.
+ *
+ * TODO: Need to re-think this design for the case
+ * set of validators for a given YANG data item is
+ * different from the set of notifiers for the same.
+ */
+ return -1;
+ }
+ }
+
+ MGMTD_TXN_DBG("Move entire Txn-Id %p from '%s' to '%s'", txn,
+ mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
+
+ /*
+ * If we are here, it means all the clients has moved to next phase.
+ * So we can move the whole commit to next phase.
+ */
+ cmtcfg_req->curr_phase = cmtcfg_req->next_phase;
+ cmtcfg_req->next_phase++;
+ MGMTD_TXN_DBG(
+ "Move back all config batches for Txn %p from next to current branch",
+ txn);
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ curr_list = &cmtcfg_req->curr_batches[id];
+ next_list = &cmtcfg_req->next_batches[id];
+ mgmt_move_txn_cfg_batches(txn, cmtcfg_req, next_list,
+ curr_list, false, 0);
+ }
+
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+
+ return 0;
+}
+
+static int
+mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ struct mgmt_txn_batches_head *curr_list, *next_list;
+
+ if (txn->type != MGMTD_TXN_TYPE_CONFIG || !txn->commit_cfg_req)
+ return -1;
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+
+ MGMTD_TXN_DBG(
+ "Move Txn-Id %p for '%s' Phase(current: '%s' next:'%s')", txn,
+ adapter->name, mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
+
+ MGMTD_TXN_DBG(
+ "Move all config batches for '%s' from current to next list",
+ adapter->name);
+ curr_list = &cmtcfg_req->curr_batches[adapter->id];
+ next_list = &cmtcfg_req->next_batches[adapter->id];
+ mgmt_move_txn_cfg_batches(txn, cmtcfg_req, curr_list, next_list, true,
+ cmtcfg_req->next_phase);
+
+ MGMTD_TXN_DBG("Txn-Id %p, Phase(current:'%s' next:'%s')", txn,
+ mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
+
+ /*
+ * Check if all clients has moved to next phase or not.
+ */
+ mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
+
+ return 0;
+}
+
+static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
+ struct nb_config_cbs *changes)
+{
+ struct nb_config_cb *cb, *nxt;
+ struct nb_config_change *chg;
+ struct mgmt_txn_be_cfg_batch *cfg_btch;
+ struct mgmt_be_client_subscr_info subscr_info;
+ char *xpath = NULL, *value = NULL;
+ char err_buf[1024];
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ bool found_validator;
+ int num_chgs = 0;
+ int xpath_len, value_len;
+
+ cmtcfg_req = &txn_req->req.commit_cfg;
+
+ RB_FOREACH_SAFE (cb, nb_config_cbs, changes, nxt) {
+ chg = (struct nb_config_change *)cb;
+
+ /*
+ * Could have directly pointed to xpath in nb_node.
+ * But dont want to mess with it now.
+ * xpath = chg->cb.nb_node->xpath;
+ */
+ xpath = lyd_path(chg->cb.dnode, LYD_PATH_STD, NULL, 0);
+ if (!xpath) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn_req->txn, MGMTD_INTERNAL_ERROR,
+ "Internal error! Could not get Xpath from Ds node!");
+ goto mgmt_txn_create_config_batches_failed;
+ }
+
+ value = (char *)lyd_get_value(chg->cb.dnode);
+ if (!value)
+ value = (char *)MGMTD_BE_CONTAINER_NODE_VAL;
+
+ MGMTD_TXN_DBG("XPATH: %s, Value: '%s'", xpath,
+ value ? value : "NIL");
+
+ if (mgmt_be_get_subscr_info_for_xpath(xpath, &subscr_info)
+ != 0) {
+ snprintf(err_buf, sizeof(err_buf),
+ "No backend module found for XPATH: '%s",
+ xpath);
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn_req->txn, MGMTD_INTERNAL_ERROR, err_buf);
+ goto mgmt_txn_create_config_batches_failed;
+ }
+
+ xpath_len = strlen(xpath) + 1;
+ value_len = strlen(value) + 1;
+ found_validator = false;
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (!subscr_info.xpath_subscr[id].validate_config
+ && !subscr_info.xpath_subscr[id].notify_config)
+ continue;
+
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (!adapter)
+ continue;
+
+ cfg_btch = cmtcfg_req->last_be_cfg_batch[id];
+ if (!cfg_btch
+ || (cfg_btch->num_cfg_data
+ == MGMTD_MAX_CFG_CHANGES_IN_BATCH)
+ || (cfg_btch->buf_space_left
+ < (xpath_len + value_len))) {
+ /* Allocate a new config batch */
+ cfg_btch = mgmt_txn_cfg_batch_alloc(
+ txn_req->txn, id, adapter);
+ }
+
+ cfg_btch->buf_space_left -= (xpath_len + value_len);
+ memcpy(&cfg_btch->xp_subscr[cfg_btch->num_cfg_data],
+ &subscr_info.xpath_subscr[id],
+ sizeof(cfg_btch->xp_subscr[0]));
+
+ mgmt_yang_cfg_data_req_init(
+ &cfg_btch->cfg_data[cfg_btch->num_cfg_data]);
+ cfg_btch->cfg_datap[cfg_btch->num_cfg_data] =
+ &cfg_btch->cfg_data[cfg_btch->num_cfg_data];
+
+ if (chg->cb.operation == NB_OP_DESTROY)
+ cfg_btch->cfg_data[cfg_btch->num_cfg_data]
+ .req_type =
+ MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA;
+ else
+ cfg_btch->cfg_data[cfg_btch->num_cfg_data]
+ .req_type =
+ MGMTD__CFG_DATA_REQ_TYPE__SET_DATA;
+
+ mgmt_yang_data_init(
+ &cfg_btch->data[cfg_btch->num_cfg_data]);
+ cfg_btch->cfg_data[cfg_btch->num_cfg_data].data =
+ &cfg_btch->data[cfg_btch->num_cfg_data];
+ cfg_btch->data[cfg_btch->num_cfg_data].xpath = xpath;
+ xpath = NULL;
+
+ mgmt_yang_data_value_init(
+ &cfg_btch->value[cfg_btch->num_cfg_data]);
+ cfg_btch->data[cfg_btch->num_cfg_data].value =
+ &cfg_btch->value[cfg_btch->num_cfg_data];
+ cfg_btch->value[cfg_btch->num_cfg_data].value_case =
+ MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
+ cfg_btch->value[cfg_btch->num_cfg_data]
+ .encoded_str_val = value;
+ value = NULL;
+
+ if (subscr_info.xpath_subscr[id].validate_config)
+ found_validator = true;
+
+ cmtcfg_req->subscr_info.xpath_subscr[id].subscribed |=
+ subscr_info.xpath_subscr[id].subscribed;
+ MGMTD_TXN_DBG(
+ " -- %s, {V:%d, N:%d}, Batch: %p, Item:%d",
+ adapter->name,
+ subscr_info.xpath_subscr[id].validate_config,
+ subscr_info.xpath_subscr[id].notify_config,
+ cfg_btch, (int)cfg_btch->num_cfg_data);
+
+ cfg_btch->num_cfg_data++;
+ num_chgs++;
+ }
+
+ if (!found_validator) {
+ snprintf(err_buf, sizeof(err_buf),
+ "No validator module found for XPATH: '%s",
+ xpath);
+ MGMTD_TXN_ERR("***** %s", err_buf);
+ }
+ }
+
+ cmtcfg_req->cmt_stats->last_batch_cnt = num_chgs;
+ if (!num_chgs) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn_req->txn, MGMTD_NO_CFG_CHANGES,
+ "No changes found to commit!");
+ goto mgmt_txn_create_config_batches_failed;
+ }
+
+ cmtcfg_req->next_phase = MGMTD_COMMIT_PHASE_TXN_CREATE;
+ return 0;
+
+mgmt_txn_create_config_batches_failed:
+
+ if (xpath)
+ free(xpath);
+
+ return -1;
+}
+
+static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
+{
+ struct nb_context nb_ctx;
+ struct nb_config *nb_config;
+ struct nb_config_cbs changes;
+ struct nb_config_cbs *cfg_chgs = NULL;
+ int ret;
+ bool del_cfg_chgs = false;
+
+ ret = 0;
+ memset(&nb_ctx, 0, sizeof(nb_ctx));
+ memset(&changes, 0, sizeof(changes));
+ if (txn->commit_cfg_req->req.commit_cfg.cfg_chgs) {
+ cfg_chgs = txn->commit_cfg_req->req.commit_cfg.cfg_chgs;
+ del_cfg_chgs = true;
+ goto mgmt_txn_prep_config_validation_done;
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.src_ds_id
+ != MGMTD_DS_CANDIDATE) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INVALID_PARAM,
+ "Source DS cannot be any other than CANDIDATE!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.dst_ds_id
+ != MGMTD_DS_RUNNING) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INVALID_PARAM,
+ "Destination DS cannot be any other than RUNNING!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (!txn->commit_cfg_req->req.commit_cfg.src_ds_ctx) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INVALID_PARAM, "No such source datastore!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (!txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INVALID_PARAM,
+ "No such destination datastore!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.abort) {
+ /*
+ * This is a commit abort request. Return back success.
+ * That should trigger a restore of Candidate datastore to
+ * Running.
+ */
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
+ NULL);
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ nb_config = mgmt_ds_get_nb_config(
+ txn->commit_cfg_req->req.commit_cfg.src_ds_ctx);
+ if (!nb_config) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Unable to retrieve Commit DS Config Tree!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ /*
+ * Check for diffs from scratch buffer. If found empty
+ * get the diff from Candidate DS itself.
+ */
+ cfg_chgs = &nb_config->cfg_chgs;
+ if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
+ /*
+ * This could be the case when the config is directly
+ * loaded onto the candidate DS from a file. Get the
+ * diff from a full comparison of the candidate and
+ * running DSs.
+ */
+ nb_config_diff(
+ mgmt_ds_get_nb_config(txn->commit_cfg_req->req
+ .commit_cfg.dst_ds_ctx),
+ nb_config, &changes);
+ cfg_chgs = &changes;
+ del_cfg_chgs = true;
+ }
+
+ if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
+ /*
+ * This means there's no changes to commit whatsoever
+ * is the source of the changes in config.
+ */
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_NO_CFG_CHANGES,
+ "No changes found to be committed!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ if (mm->perf_stats_en)
+ gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
+ ->validate_start,
+ NULL);
+ /*
+ * Validate YANG contents of the source DS and get the diff
+ * between source and destination DS contents.
+ */
+ char err_buf[1024] = {0};
+ nb_ctx.client = NB_CLIENT_MGMTD_SERVER;
+ nb_ctx.user = (void *)txn;
+ ret = nb_candidate_validate_yang(nb_config, false, err_buf,
+ sizeof(err_buf) - 1);
+ if (ret != NB_OK) {
+ if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
+ strlcpy(err_buf, "Validation failed", sizeof(err_buf));
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
+ err_buf);
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+ /*
+ * Perform application level validations locally on the MGMTD
+ * process by calling application specific validation routines
+ * loaded onto MGMTD process using libraries.
+ */
+ ret = nb_candidate_validate_code(&nb_ctx, nb_config, &changes, err_buf,
+ sizeof(err_buf) - 1);
+ if (ret != NB_OK) {
+ if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
+ strlcpy(err_buf, "Validation failed", sizeof(err_buf));
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
+ err_buf);
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.validate_only) {
+ /*
+ * This was a validate-only COMMIT request return success.
+ */
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
+ NULL);
+ goto mgmt_txn_prepare_config_done;
+ }
+#endif /* ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED */
+
+mgmt_txn_prep_config_validation_done:
+
+ if (mm->perf_stats_en)
+ gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
+ ->prep_cfg_start,
+ NULL);
+
+ /*
+ * Iterate over the diffs and create ordered batches of config
+ * commands to be validated.
+ */
+ ret = mgmt_txn_create_config_batches(txn->commit_cfg_req, cfg_chgs);
+ if (ret != 0) {
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ /* Move to the Transaction Create Phase */
+ txn->commit_cfg_req->req.commit_cfg.curr_phase =
+ MGMTD_COMMIT_PHASE_TXN_CREATE;
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+
+ /*
+ * Start the COMMIT Timeout Timer to abort Txn if things get stuck at
+ * backend.
+ */
+ mgmt_txn_register_event(txn, MGMTD_TXN_COMMITCFG_TIMEOUT);
+mgmt_txn_prepare_config_done:
+
+ if (cfg_chgs && del_cfg_chgs)
+ nb_config_diff_del_changes(cfg_chgs);
+
+ return ret;
+}
+
+static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
+{
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ struct mgmt_txn_be_cfg_batch *cfg_btch;
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (cmtcfg_req->subscr_info.xpath_subscr[id].subscribed) {
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (mgmt_be_create_txn(adapter, txn->txn_id)
+ != 0) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Could not send TXN_CREATE to backend adapter");
+ return -1;
+ }
+
+ FOREACH_TXN_CFG_BATCH_IN_LIST (
+ &txn->commit_cfg_req->req.commit_cfg
+ .curr_batches[id],
+ cfg_btch)
+ cfg_btch->comm_phase =
+ MGMTD_COMMIT_PHASE_TXN_CREATE;
+ }
+ }
+
+ txn->commit_cfg_req->req.commit_cfg.next_phase =
+ MGMTD_COMMIT_PHASE_SEND_CFG;
+
+ /*
+ * Dont move the commit to next phase yet. Wait for the TXN_REPLY to
+ * come back.
+ */
+
+ MGMTD_TXN_DBG(
+ "Txn:%p Session:0x%llx, Phase(Current:'%s', Next: '%s')", txn,
+ (unsigned long long)txn->session_id,
+ mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
+
+ return 0;
+}
+
+static int
+mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ struct mgmt_txn_be_cfg_batch *cfg_btch;
+ struct mgmt_be_cfgreq cfg_req = {0};
+ size_t num_batches, indx;
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ assert(cmtcfg_req->subscr_info.xpath_subscr[adapter->id].subscribed);
+
+ indx = 0;
+ num_batches =
+ mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id]);
+ FOREACH_TXN_CFG_BATCH_IN_LIST (&cmtcfg_req->curr_batches[adapter->id],
+ cfg_btch) {
+ assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_SEND_CFG);
+
+ cfg_req.cfgdata_reqs = cfg_btch->cfg_datap;
+ cfg_req.num_reqs = cfg_btch->num_cfg_data;
+ indx++;
+ if (mgmt_be_send_cfg_data_create_req(
+ adapter, txn->txn_id, cfg_btch->batch_id, &cfg_req,
+ indx == num_batches ? true : false)
+ != 0) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Internal Error! Could not send config data to backend!");
+ MGMTD_TXN_ERR(
+ "Could not send CFGDATA_CREATE for Txn %p Batch %p to client '%s",
+ txn, cfg_btch, adapter->name);
+ return -1;
+ }
+
+ cmtcfg_req->cmt_stats->last_num_cfgdata_reqs++;
+ mgmt_move_txn_cfg_batch_to_next(
+ cmtcfg_req, cfg_btch,
+ &cmtcfg_req->curr_batches[adapter->id],
+ &cmtcfg_req->next_batches[adapter->id], true,
+ MGMTD_COMMIT_PHASE_SEND_CFG);
+ }
+
+ /*
+ * This could ne the last Backend Client to send CFGDATA_CREATE_REQ to.
+ * Try moving the commit to next phase.
+ */
+ mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
+
+ return 0;
+}
+
+static int
+mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ struct mgmt_txn_be_cfg_batch *cfg_btch;
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ if (cmtcfg_req->subscr_info.xpath_subscr[adapter->id].subscribed) {
+ adapter = mgmt_be_get_adapter_by_id(adapter->id);
+ (void)mgmt_be_destroy_txn(adapter, txn->txn_id);
+
+ FOREACH_TXN_CFG_BATCH_IN_LIST (
+ &txn->commit_cfg_req->req.commit_cfg
+ .curr_batches[adapter->id],
+ cfg_btch)
+ cfg_btch->comm_phase = MGMTD_COMMIT_PHASE_TXN_DELETE;
+ }
+
+ return 0;
+}
+
+static void mgmt_txn_cfg_commit_timedout(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG);
+
+ if (!txn->commit_cfg_req)
+ return;
+
+ MGMTD_TXN_ERR(
+ "Backend operations for Config Txn %p has timedout! Aborting commit!!",
+ txn);
+
+ /*
+ * Send a COMMIT_CONFIG_REPLY with failure.
+ * NOTE: The transaction cleanup will be triggered from Front-end
+ * adapter.
+ */
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Operation on the backend timed-out. Aborting commit!");
+}
+
+/*
+ * Send CFG_APPLY_REQs to all the backend client.
+ *
+ * NOTE: This is always dispatched when all CFGDATA_CREATE_REQs
+ * for all backend clients has been generated. Please see
+ * mgmt_txn_register_event() and mgmt_txn_process_commit_cfg()
+ * for details.
+ */
+static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
+{
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ struct mgmt_txn_batches_head *btch_list;
+ struct mgmt_txn_be_cfg_batch *cfg_btch;
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ if (cmtcfg_req->validate_only) {
+ /*
+ * If this was a validate-only COMMIT request return success.
+ */
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
+ NULL);
+ return 0;
+ }
+
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (cmtcfg_req->subscr_info.xpath_subscr[id].notify_config) {
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (!adapter)
+ return -1;
+
+ btch_list = &cmtcfg_req->curr_batches[id];
+ if (mgmt_be_send_cfg_apply_req(adapter, txn->txn_id)
+ != 0) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Could not send CFG_APPLY_REQ to backend adapter");
+ return -1;
+ }
+ cmtcfg_req->cmt_stats->last_num_apply_reqs++;
+
+ UNSET_FLAG(adapter->flags,
+ MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
+
+ FOREACH_TXN_CFG_BATCH_IN_LIST (btch_list, cfg_btch)
+ cfg_btch->comm_phase =
+ MGMTD_COMMIT_PHASE_APPLY_CFG;
+ }
+ }
+
+ txn->commit_cfg_req->req.commit_cfg.next_phase =
+ MGMTD_COMMIT_PHASE_TXN_DELETE;
+
+ /*
+ * Dont move the commit to next phase yet. Wait for all VALIDATE_REPLIES
+ * to come back.
+ */
+
+ return 0;
+}
+
+static void mgmt_txn_process_commit_cfg(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+
+ MGMTD_TXN_DBG(
+ "Processing COMMIT_CONFIG for Txn:%p Session:0x%llx, Phase(Current:'%s', Next: '%s')",
+ txn, (unsigned long long)txn->session_id,
+ mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
+
+ assert(txn->commit_cfg_req);
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ switch (cmtcfg_req->curr_phase) {
+ case MGMTD_COMMIT_PHASE_PREPARE_CFG:
+ mgmt_txn_prepare_config(txn);
+ break;
+ case MGMTD_COMMIT_PHASE_TXN_CREATE:
+ if (mm->perf_stats_en)
+ gettimeofday(&cmtcfg_req->cmt_stats->txn_create_start,
+ NULL);
+ /*
+ * Send TXN_CREATE_REQ to all Backend now.
+ */
+ mgmt_txn_send_be_txn_create(txn);
+ break;
+ case MGMTD_COMMIT_PHASE_SEND_CFG:
+ if (mm->perf_stats_en)
+ gettimeofday(&cmtcfg_req->cmt_stats->send_cfg_start,
+ NULL);
+ /*
+ * All CFGDATA_CREATE_REQ should have been sent to
+ * Backend by now.
+ */
+#ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
+ MGMTD_TXN_DBG(
+ "Txn:%p Session:0x%llx, trigger sending CFG_VALIDATE_REQ to all backend clients",
+ txn, (unsigned long long)txn->session_id);
+#else /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
+ assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
+ MGMTD_TXN_DBG(
+ "Txn:%p Session:0x%llx, trigger sending CFG_APPLY_REQ to all backend clients",
+ txn, (unsigned long long)txn->session_id);
+#endif /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
+ break;
+ case MGMTD_COMMIT_PHASE_APPLY_CFG:
+ if (mm->perf_stats_en)
+ gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_start,
+ NULL);
+ /*
+ * We should have received successful CFG_VALIDATE_REPLY from
+ * all concerned Backend Clients by now. Send out the
+ * CFG_APPLY_REQs now.
+ */
+ mgmt_txn_send_be_cfg_apply(txn);
+ break;
+ case MGMTD_COMMIT_PHASE_TXN_DELETE:
+ if (mm->perf_stats_en)
+ gettimeofday(&cmtcfg_req->cmt_stats->txn_del_start,
+ NULL);
+ /*
+ * We would have sent TXN_DELETE_REQ to all backend by now.
+ * Send a successful CONFIG_COMMIT_REPLY back to front-end.
+ * NOTE: This should also trigger DS merge/unlock and Txn
+ * cleanup. Please see mgmt_fe_send_commit_cfg_reply() for
+ * more details.
+ */
+ EVENT_OFF(txn->comm_cfg_timeout);
+ mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
+ break;
+ case MGMTD_COMMIT_PHASE_MAX:
+ break;
+ }
+
+ MGMTD_TXN_DBG(
+ "Txn:%p Session:0x%llx, Phase updated to (Current:'%s', Next: '%s')",
+ txn, (unsigned long long)txn->session_id,
+ mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
+}
+
+static void mgmt_init_get_data_reply(struct mgmt_get_data_reply *get_reply)
+{
+ size_t indx;
+
+ for (indx = 0; indx < array_size(get_reply->reply_data); indx++)
+ get_reply->reply_datap[indx] = &get_reply->reply_data[indx];
+}
+
+static void mgmt_reset_get_data_reply(struct mgmt_get_data_reply *get_reply)
+{
+ int indx;
+
+ for (indx = 0; indx < get_reply->num_reply; indx++) {
+ if (get_reply->reply_xpathp[indx]) {
+ free(get_reply->reply_xpathp[indx]);
+ get_reply->reply_xpathp[indx] = 0;
+ }
+ if (get_reply->reply_data[indx].xpath) {
+ zlog_debug("%s free xpath %p", __func__,
+ get_reply->reply_data[indx].xpath);
+ free(get_reply->reply_data[indx].xpath);
+ get_reply->reply_data[indx].xpath = 0;
+ }
+ }
+
+ get_reply->num_reply = 0;
+ memset(&get_reply->data_reply, 0, sizeof(get_reply->data_reply));
+ memset(&get_reply->reply_data, 0, sizeof(get_reply->reply_data));
+ memset(&get_reply->reply_datap, 0, sizeof(get_reply->reply_datap));
+
+ memset(&get_reply->reply_value, 0, sizeof(get_reply->reply_value));
+
+ mgmt_init_get_data_reply(get_reply);
+}
+
+static void mgmt_reset_get_data_reply_buf(struct mgmt_get_data_req *get_data)
+{
+ if (get_data->reply)
+ mgmt_reset_get_data_reply(get_data->reply);
+}
+
+static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
+ struct mgmt_get_data_req *get_req)
+{
+ struct mgmt_get_data_reply *get_reply;
+ Mgmtd__YangDataReply *data_reply;
+
+ get_reply = get_req->reply;
+ if (!get_reply)
+ return;
+
+ data_reply = &get_reply->data_reply;
+ mgmt_yang_data_reply_init(data_reply);
+ data_reply->n_data = get_reply->num_reply;
+ data_reply->data = get_reply->reply_datap;
+ data_reply->next_indx =
+ (!get_reply->last_batch ? get_req->total_reply : -1);
+
+ MGMTD_TXN_DBG("Sending %d Get-Config/Data replies (next-idx:%lld)",
+ (int) data_reply->n_data,
+ (long long)data_reply->next_indx);
+
+ switch (txn_req->req_event) {
+ case MGMTD_TXN_PROC_GETCFG:
+ if (mgmt_fe_send_get_cfg_reply(
+ txn_req->txn->session_id, txn_req->txn->txn_id,
+ get_req->ds_id, txn_req->req_id, MGMTD_SUCCESS,
+ data_reply, NULL)
+ != 0) {
+ MGMTD_TXN_ERR(
+ "Failed to send GET-CONFIG-REPLY for Txn %p, Sessn: 0x%llx, Req: %llu",
+ txn_req->txn,
+ (unsigned long long)txn_req->txn->session_id,
+ (unsigned long long)txn_req->req_id);
+ }
+ break;
+ case MGMTD_TXN_PROC_GETDATA:
+ if (mgmt_fe_send_get_data_reply(
+ txn_req->txn->session_id, txn_req->txn->txn_id,
+ get_req->ds_id, txn_req->req_id, MGMTD_SUCCESS,
+ data_reply, NULL)
+ != 0) {
+ MGMTD_TXN_ERR(
+ "Failed to send GET-DATA-REPLY for Txn %p, Sessn: 0x%llx, Req: %llu",
+ txn_req->txn,
+ (unsigned long long)txn_req->txn->session_id,
+ (unsigned long long)txn_req->req_id);
+ }
+ break;
+ case MGMTD_TXN_PROC_SETCFG:
+ case MGMTD_TXN_PROC_COMMITCFG:
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ case MGMTD_TXN_CLEANUP:
+ MGMTD_TXN_ERR("Invalid Txn-Req-Event %u",
+ txn_req->req_event);
+ break;
+ }
+
+ /*
+ * Reset reply buffer for next reply.
+ */
+ mgmt_reset_get_data_reply_buf(get_req);
+}
+
+static void mgmt_txn_iter_and_send_get_cfg_reply(struct mgmt_ds_ctx *ds_ctx,
+ char *xpath,
+ struct lyd_node *node,
+ struct nb_node *nb_node,
+ void *ctx)
+{
+ struct mgmt_txn_req *txn_req;
+ struct mgmt_get_data_req *get_req;
+ struct mgmt_get_data_reply *get_reply;
+ Mgmtd__YangData *data;
+ Mgmtd__YangDataValue *data_value;
+
+ txn_req = (struct mgmt_txn_req *)ctx;
+ if (!txn_req)
+ goto mgmtd_ignore_get_cfg_reply_data;
+
+ if (!(node->schema->nodetype & LYD_NODE_TERM))
+ goto mgmtd_ignore_get_cfg_reply_data;
+
+ assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG
+ || txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
+
+ get_req = txn_req->req.get_data;
+ assert(get_req);
+ get_reply = get_req->reply;
+ data = &get_reply->reply_data[get_reply->num_reply];
+ data_value = &get_reply->reply_value[get_reply->num_reply];
+
+ mgmt_yang_data_init(data);
+ data->xpath = xpath;
+ mgmt_yang_data_value_init(data_value);
+ data_value->value_case = MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
+ data_value->encoded_str_val = (char *)lyd_get_value(node);
+ data->value = data_value;
+
+ get_reply->num_reply++;
+ get_req->total_reply++;
+ MGMTD_TXN_DBG(" [%d] XPATH: '%s', Value: '%s'", get_req->total_reply,
+ data->xpath, data_value->encoded_str_val);
+
+ if (get_reply->num_reply == MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH)
+ mgmt_txn_send_getcfg_reply_data(txn_req, get_req);
+
+ return;
+
+mgmtd_ignore_get_cfg_reply_data:
+ if (xpath)
+ free(xpath);
+}
+
+static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
+ struct mgmt_txn_req *txn_req,
+ struct mgmt_ds_ctx *ds_ctx)
+{
+ struct mgmt_txn_reqs_head *req_list = NULL;
+ struct mgmt_txn_reqs_head *pending_list = NULL;
+ int indx;
+ struct mgmt_get_data_req *get_data;
+ struct mgmt_get_data_reply *get_reply;
+
+ switch (txn_req->req_event) {
+ case MGMTD_TXN_PROC_GETCFG:
+ req_list = &txn->get_cfg_reqs;
+ break;
+ case MGMTD_TXN_PROC_GETDATA:
+ req_list = &txn->get_data_reqs;
+ break;
+ case MGMTD_TXN_PROC_SETCFG:
+ case MGMTD_TXN_PROC_COMMITCFG:
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ case MGMTD_TXN_CLEANUP:
+ assert(!"Wrong txn request type!");
+ break;
+ }
+
+ get_data = txn_req->req.get_data;
+
+ if (!get_data->reply) {
+ get_data->reply = XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REPLY,
+ sizeof(struct mgmt_get_data_reply));
+ if (!get_data->reply) {
+ mgmt_fe_send_get_cfg_reply(
+ txn->session_id, txn->txn_id,
+ get_data->ds_id, txn_req->req_id,
+ MGMTD_INTERNAL_ERROR, NULL,
+ "Internal error: Unable to allocate reply buffers!");
+ goto mgmt_txn_get_config_failed;
+ }
+ }
+
+ /*
+ * Read data contents from the DS and respond back directly.
+ * No need to go to backend for getting data.
+ */
+ get_reply = get_data->reply;
+ for (indx = 0; indx < get_data->num_xpaths; indx++) {
+ MGMTD_TXN_DBG("Trying to get all data under '%s'",
+ get_data->xpaths[indx]);
+ mgmt_init_get_data_reply(get_reply);
+ if (mgmt_ds_iter_data(get_data->ds_ctx, get_data->xpaths[indx],
+ mgmt_txn_iter_and_send_get_cfg_reply,
+ (void *)txn_req, true)
+ == -1) {
+ MGMTD_TXN_DBG("Invalid Xpath '%s",
+ get_data->xpaths[indx]);
+ mgmt_fe_send_get_cfg_reply(
+ txn->session_id, txn->txn_id,
+ get_data->ds_id, txn_req->req_id,
+ MGMTD_INTERNAL_ERROR, NULL, "Invalid xpath");
+ goto mgmt_txn_get_config_failed;
+ }
+ MGMTD_TXN_DBG("Got %d remaining data-replies for xpath '%s'",
+ get_reply->num_reply, get_data->xpaths[indx]);
+ get_reply->last_batch = true;
+ mgmt_txn_send_getcfg_reply_data(txn_req, get_data);
+ }
+
+mgmt_txn_get_config_failed:
+
+ if (pending_list) {
+ /*
+ * Move the transaction to corresponding pending list.
+ */
+ if (req_list)
+ mgmt_txn_reqs_del(req_list, txn_req);
+ txn_req->pending_be_proc = true;
+ mgmt_txn_reqs_add_tail(pending_list, txn_req);
+ MGMTD_TXN_DBG(
+ "Moved Req: %p for Txn: %p from Req-List to Pending-List",
+ txn_req, txn_req->txn);
+ } else {
+ /*
+ * Delete the txn request. It will also remove it from request
+ * list.
+ */
+ mgmt_txn_req_free(&txn_req);
+ }
+
+ return 0;
+}
+
+static void mgmt_txn_process_get_cfg(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ struct mgmt_ds_ctx *ds_ctx;
+ int num_processed = 0;
+ bool error;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+
+ MGMTD_TXN_DBG(
+ "Processing %d GET_CONFIG requests for Txn:%p Session:0x%llx",
+ (int)mgmt_txn_reqs_count(&txn->get_cfg_reqs), txn,
+ (unsigned long long)txn->session_id);
+
+ FOREACH_TXN_REQ_IN_LIST (&txn->get_cfg_reqs, txn_req) {
+ error = false;
+ assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG);
+ ds_ctx = txn_req->req.get_data->ds_ctx;
+ if (!ds_ctx) {
+ mgmt_fe_send_get_cfg_reply(
+ txn->session_id, txn->txn_id,
+ txn_req->req.get_data->ds_id, txn_req->req_id,
+ MGMTD_INTERNAL_ERROR, NULL,
+ "No such datastore!");
+ error = true;
+ goto mgmt_txn_process_get_cfg_done;
+ }
+
+ if (mgmt_txn_get_config(txn, txn_req, ds_ctx) != 0) {
+ MGMTD_TXN_ERR(
+ "Unable to retrieve Config from DS %d for Txn %p, Sessn: 0x%llx, Req: %llu!",
+ txn_req->req.get_data->ds_id, txn,
+ (unsigned long long)txn->session_id,
+ (unsigned long long)txn_req->req_id);
+ error = true;
+ }
+
+ mgmt_txn_process_get_cfg_done:
+
+ if (error) {
+ /*
+ * Delete the txn request.
+ * Note: The following will remove it from the list
+ * as well.
+ */
+ mgmt_txn_req_free(&txn_req);
+ }
+
+ /*
+ * Else the transaction would have been already deleted or
+ * moved to corresponding pending list. No need to delete it.
+ */
+ num_processed++;
+ if (num_processed == MGMTD_TXN_MAX_NUM_GETCFG_PROC)
+ break;
+ }
+
+ if (mgmt_txn_reqs_count(&txn->get_cfg_reqs)) {
+ MGMTD_TXN_DBG(
+ "Processed maximum number of Get-Config requests (%d/%d). Rescheduling for rest.",
+ num_processed, MGMTD_TXN_MAX_NUM_GETCFG_PROC);
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
+ }
+}
+
+static void mgmt_txn_process_get_data(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ struct mgmt_ds_ctx *ds_ctx;
+ int num_processed = 0;
+ bool error;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+
+ MGMTD_TXN_DBG(
+ "Processing %d GET_DATA requests for Txn:%p Session:0x%llx",
+ (int)mgmt_txn_reqs_count(&txn->get_data_reqs), txn,
+ (unsigned long long)txn->session_id);
+
+ FOREACH_TXN_REQ_IN_LIST (&txn->get_data_reqs, txn_req) {
+ error = false;
+ assert(txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
+ ds_ctx = txn_req->req.get_data->ds_ctx;
+ if (!ds_ctx) {
+ mgmt_fe_send_get_data_reply(
+ txn->session_id, txn->txn_id,
+ txn_req->req.get_data->ds_id, txn_req->req_id,
+ MGMTD_INTERNAL_ERROR, NULL,
+ "No such datastore!");
+ error = true;
+ goto mgmt_txn_process_get_data_done;
+ }
+
+ if (mgmt_ds_is_config(ds_ctx)) {
+ if (mgmt_txn_get_config(txn, txn_req, ds_ctx)
+ != 0) {
+ MGMTD_TXN_ERR(
+ "Unable to retrieve Config from DS %d for Txn %p, Sessn: 0x%llx, Req: %llu!",
+ txn_req->req.get_data->ds_id, txn,
+ (unsigned long long)txn->session_id,
+ (unsigned long long)txn_req->req_id);
+ error = true;
+ }
+ } else {
+ /*
+ * TODO: Trigger GET procedures for Backend
+ * For now return back error.
+ */
+ mgmt_fe_send_get_data_reply(
+ txn->session_id, txn->txn_id,
+ txn_req->req.get_data->ds_id, txn_req->req_id,
+ MGMTD_INTERNAL_ERROR, NULL,
+ "GET-DATA on Oper DS is not supported yet!");
+ error = true;
+ }
+
+ mgmt_txn_process_get_data_done:
+
+ if (error) {
+ /*
+ * Delete the txn request.
+ * Note: The following will remove it from the list
+ * as well.
+ */
+ mgmt_txn_req_free(&txn_req);
+ }
+
+ /*
+ * Else the transaction would have been already deleted or
+ * moved to corresponding pending list. No need to delete it.
+ */
+ num_processed++;
+ if (num_processed == MGMTD_TXN_MAX_NUM_GETDATA_PROC)
+ break;
+ }
+
+ if (mgmt_txn_reqs_count(&txn->get_data_reqs)) {
+ MGMTD_TXN_DBG(
+ "Processed maximum number of Get-Data requests (%d/%d). Rescheduling for rest.",
+ num_processed, MGMTD_TXN_MAX_NUM_GETDATA_PROC);
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
+ }
+}
+
+static struct mgmt_txn_ctx *
+mgmt_fe_find_txn_by_session_id(struct mgmt_master *cm, uint64_t session_id,
+ enum mgmt_txn_type type)
+{
+ struct mgmt_txn_ctx *txn;
+
+ FOREACH_TXN_IN_LIST (cm, txn) {
+ if (txn->session_id == session_id && txn->type == type)
+ return txn;
+ }
+
+ return NULL;
+}
+
+static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
+ enum mgmt_txn_type type)
+{
+ struct mgmt_txn_ctx *txn = NULL;
+
+ /*
+ * For 'CONFIG' transaction check if one is already created
+ * or not.
+ */
+ if (type == MGMTD_TXN_TYPE_CONFIG && mgmt_txn_mm->cfg_txn) {
+ if (mgmt_config_txn_in_progress() == session_id)
+ txn = mgmt_txn_mm->cfg_txn;
+ goto mgmt_create_txn_done;
+ }
+
+ txn = mgmt_fe_find_txn_by_session_id(mgmt_txn_mm, session_id,
+ type);
+ if (!txn) {
+ txn = XCALLOC(MTYPE_MGMTD_TXN, sizeof(struct mgmt_txn_ctx));
+ assert(txn);
+
+ txn->session_id = session_id;
+ txn->type = type;
+ mgmt_txn_badapters_init(&txn->be_adapters);
+ mgmt_txns_add_tail(&mgmt_txn_mm->txn_list, txn);
+ mgmt_txn_reqs_init(&txn->set_cfg_reqs);
+ mgmt_txn_reqs_init(&txn->get_cfg_reqs);
+ mgmt_txn_reqs_init(&txn->get_data_reqs);
+ mgmt_txn_reqs_init(&txn->pending_get_datas);
+ txn->commit_cfg_req = NULL;
+ txn->refcount = 0;
+ if (!mgmt_txn_mm->next_txn_id)
+ mgmt_txn_mm->next_txn_id++;
+ txn->txn_id = mgmt_txn_mm->next_txn_id++;
+ hash_get(mgmt_txn_mm->txn_hash, txn, hash_alloc_intern);
+
+ MGMTD_TXN_DBG("Added new '%s' MGMTD Transaction '%p'",
+ mgmt_txn_type2str(type), txn);
+
+ if (type == MGMTD_TXN_TYPE_CONFIG)
+ mgmt_txn_mm->cfg_txn = txn;
+
+ MGMTD_TXN_LOCK(txn);
+ }
+
+mgmt_create_txn_done:
+ return txn;
+}
+
+static void mgmt_txn_delete(struct mgmt_txn_ctx **txn)
+{
+ MGMTD_TXN_UNLOCK(txn);
+}
+
+static unsigned int mgmt_txn_hash_key(const void *data)
+{
+ const struct mgmt_txn_ctx *txn = data;
+
+ return jhash2((uint32_t *) &txn->txn_id,
+ sizeof(txn->txn_id) / sizeof(uint32_t), 0);
+}
+
+static bool mgmt_txn_hash_cmp(const void *d1, const void *d2)
+{
+ const struct mgmt_txn_ctx *txn1 = d1;
+ const struct mgmt_txn_ctx *txn2 = d2;
+
+ return (txn1->txn_id == txn2->txn_id);
+}
+
+static void mgmt_txn_hash_free(void *data)
+{
+ struct mgmt_txn_ctx *txn = data;
+
+ mgmt_txn_delete(&txn);
+}
+
+static void mgmt_txn_hash_init(void)
+{
+ if (!mgmt_txn_mm || mgmt_txn_mm->txn_hash)
+ return;
+
+ mgmt_txn_mm->txn_hash = hash_create(mgmt_txn_hash_key,
+ mgmt_txn_hash_cmp,
+ "MGMT Transactions");
+}
+
+static void mgmt_txn_hash_destroy(void)
+{
+ if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
+ return;
+
+ hash_clean(mgmt_txn_mm->txn_hash,
+ mgmt_txn_hash_free);
+ hash_free(mgmt_txn_mm->txn_hash);
+ mgmt_txn_mm->txn_hash = NULL;
+}
+
+static inline struct mgmt_txn_ctx *
+mgmt_txn_id2ctx(uint64_t txn_id)
+{
+ struct mgmt_txn_ctx key = {0};
+ struct mgmt_txn_ctx *txn;
+
+ if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
+ return NULL;
+
+ key.txn_id = txn_id;
+ txn = hash_lookup(mgmt_txn_mm->txn_hash, &key);
+
+ return txn;
+}
+
+static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
+ int line)
+{
+ txn->refcount++;
+ MGMTD_TXN_DBG("%s:%d --> Lock %s Txn %p, Count: %d", file, line,
+ mgmt_txn_type2str(txn->type), txn, txn->refcount);
+}
+
+static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
+ int line)
+{
+ assert(*txn && (*txn)->refcount);
+
+ (*txn)->refcount--;
+ MGMTD_TXN_DBG("%s:%d --> Unlock %s Txn %p, Count: %d", file, line,
+ mgmt_txn_type2str((*txn)->type), *txn,
+ (*txn)->refcount);
+ if (!(*txn)->refcount) {
+ if ((*txn)->type == MGMTD_TXN_TYPE_CONFIG)
+ if (mgmt_txn_mm->cfg_txn == *txn)
+ mgmt_txn_mm->cfg_txn = NULL;
+ EVENT_OFF((*txn)->proc_get_cfg);
+ EVENT_OFF((*txn)->proc_get_data);
+ EVENT_OFF((*txn)->proc_comm_cfg);
+ EVENT_OFF((*txn)->comm_cfg_timeout);
+ hash_release(mgmt_txn_mm->txn_hash, *txn);
+ mgmt_txns_del(&mgmt_txn_mm->txn_list, *txn);
+
+ MGMTD_TXN_DBG("Deleted %s Txn %p for Sessn: 0x%llx",
+ mgmt_txn_type2str((*txn)->type), *txn,
+ (unsigned long long)(*txn)->session_id);
+
+ XFREE(MTYPE_MGMTD_TXN, *txn);
+ }
+
+ *txn = NULL;
+}
+
+static void mgmt_txn_cleanup_txn(struct mgmt_txn_ctx **txn)
+{
+ /* TODO: Any other cleanup applicable */
+
+ mgmt_txn_delete(txn);
+}
+
+static void
+mgmt_txn_cleanup_all_txns(void)
+{
+ struct mgmt_txn_ctx *txn;
+
+ if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
+ return;
+
+ FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn)
+ mgmt_txn_cleanup_txn(&txn);
+}
+
+static void mgmt_txn_cleanup(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+
+ mgmt_txn_cleanup_txn(&txn);
+}
+
+static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
+ enum mgmt_txn_event event)
+{
+ struct timeval tv = {.tv_sec = 0,
+ .tv_usec = MGMTD_TXN_PROC_DELAY_USEC};
+
+ assert(mgmt_txn_mm && mgmt_txn_tm);
+
+ switch (event) {
+ case MGMTD_TXN_PROC_SETCFG:
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg,
+ txn, &tv, &txn->proc_set_cfg);
+ break;
+ case MGMTD_TXN_PROC_COMMITCFG:
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_commit_cfg,
+ txn, &tv, &txn->proc_comm_cfg);
+ break;
+ case MGMTD_TXN_PROC_GETCFG:
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg,
+ txn, &tv, &txn->proc_get_cfg);
+ break;
+ case MGMTD_TXN_PROC_GETDATA:
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_data,
+ txn, &tv, &txn->proc_get_data);
+ break;
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ event_add_timer_msec(mgmt_txn_tm,
+ mgmt_txn_cfg_commit_timedout, txn,
+ MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC,
+ &txn->comm_cfg_timeout);
+ break;
+ case MGMTD_TXN_CLEANUP:
+ tv.tv_usec = MGMTD_TXN_CLEANUP_DELAY_USEC;
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_cleanup, txn, &tv,
+ &txn->clnup);
+ }
+}
+
+int mgmt_txn_init(struct mgmt_master *mm, struct event_loop *tm)
+{
+ if (mgmt_txn_mm || mgmt_txn_tm)
+ assert(!"MGMTD TXN: Call txn_init() only once");
+
+ mgmt_txn_mm = mm;
+ mgmt_txn_tm = tm;
+ mgmt_txns_init(&mm->txn_list);
+ mgmt_txn_hash_init();
+ assert(!mm->cfg_txn);
+ mm->cfg_txn = NULL;
+
+ return 0;
+}
+
+void mgmt_txn_destroy(void)
+{
+ mgmt_txn_cleanup_all_txns();
+ mgmt_txn_hash_destroy();
+}
+
+uint64_t mgmt_config_txn_in_progress(void)
+{
+ if (mgmt_txn_mm && mgmt_txn_mm->cfg_txn)
+ return mgmt_txn_mm->cfg_txn->session_id;
+
+ return MGMTD_SESSION_ID_NONE;
+}
+
+uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type)
+{
+ struct mgmt_txn_ctx *txn;
+
+ txn = mgmt_txn_create_new(session_id, type);
+ return txn ? txn->txn_id : MGMTD_TXN_ID_NONE;
+}
+
+bool mgmt_txn_id_is_valid(uint64_t txn_id)
+{
+ return mgmt_txn_id2ctx(txn_id) ? true : false;
+}
+
+void mgmt_destroy_txn(uint64_t *txn_id)
+{
+ struct mgmt_txn_ctx *txn;
+
+ txn = mgmt_txn_id2ctx(*txn_id);
+ if (!txn)
+ return;
+
+ mgmt_txn_delete(&txn);
+ *txn_id = MGMTD_TXN_ID_NONE;
+}
+
+enum mgmt_txn_type mgmt_get_txn_type(uint64_t txn_id)
+{
+ struct mgmt_txn_ctx *txn;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return MGMTD_TXN_TYPE_NONE;
+
+ return txn->type;
+}
+
+int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ Mgmtd__YangCfgDataReq **cfg_req,
+ size_t num_req, bool implicit_commit,
+ Mgmtd__DatastoreId dst_ds_id,
+ struct mgmt_ds_ctx *dst_ds_ctx)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ size_t indx;
+ uint16_t *num_chgs;
+ struct nb_cfg_change *cfg_chg;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ if (implicit_commit && mgmt_txn_reqs_count(&txn->set_cfg_reqs)) {
+ MGMTD_TXN_ERR(
+ "For implicit commit config only one SETCFG-REQ can be allowed!");
+ return -1;
+ }
+
+ txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_SETCFG);
+ txn_req->req.set_cfg->ds_id = ds_id;
+ txn_req->req.set_cfg->ds_ctx = ds_ctx;
+ num_chgs = &txn_req->req.set_cfg->num_cfg_changes;
+ for (indx = 0; indx < num_req; indx++) {
+ cfg_chg = &txn_req->req.set_cfg->cfg_changes[*num_chgs];
+
+ if (cfg_req[indx]->req_type
+ == MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA)
+ cfg_chg->operation = NB_OP_DESTROY;
+ else if (cfg_req[indx]->req_type
+ == MGMTD__CFG_DATA_REQ_TYPE__SET_DATA)
+ cfg_chg->operation =
+ mgmt_ds_find_data_node_by_xpath(
+ ds_ctx, cfg_req[indx]->data->xpath)
+ ? NB_OP_MODIFY
+ : NB_OP_CREATE;
+ else
+ continue;
+
+ MGMTD_TXN_DBG(
+ "XPath: '%s', Value: '%s'", cfg_req[indx]->data->xpath,
+ (cfg_req[indx]->data->value
+ && cfg_req[indx]
+ ->data->value
+ ->encoded_str_val
+ ? cfg_req[indx]->data->value->encoded_str_val
+ : "NULL"));
+ strlcpy(cfg_chg->xpath, cfg_req[indx]->data->xpath,
+ sizeof(cfg_chg->xpath));
+ cfg_chg->value = (cfg_req[indx]->data->value
+ && cfg_req[indx]
+ ->data->value
+ ->encoded_str_val
+ ? strdup(cfg_req[indx]
+ ->data->value
+ ->encoded_str_val)
+ : NULL);
+ if (cfg_chg->value)
+ MGMTD_TXN_DBG("Allocated value at %p ==> '%s'",
+ cfg_chg->value, cfg_chg->value);
+
+ (*num_chgs)++;
+ }
+ txn_req->req.set_cfg->implicit_commit = implicit_commit;
+ txn_req->req.set_cfg->dst_ds_id = dst_ds_id;
+ txn_req->req.set_cfg->dst_ds_ctx = dst_ds_ctx;
+ txn_req->req.set_cfg->setcfg_stats =
+ mgmt_fe_get_session_setcfg_stats(txn->session_id);
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
+
+ return 0;
+}
+
+int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId src_ds_id,
+ struct mgmt_ds_ctx *src_ds_ctx,
+ Mgmtd__DatastoreId dst_ds_id,
+ struct mgmt_ds_ctx *dst_ds_ctx,
+ bool validate_only, bool abort,
+ bool implicit)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ if (txn->commit_cfg_req) {
+ MGMTD_TXN_ERR(
+ "A commit is already in-progress for Txn %p, session 0x%llx. Cannot start another!",
+ txn, (unsigned long long)txn->session_id);
+ return -1;
+ }
+
+ txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_COMMITCFG);
+ txn_req->req.commit_cfg.src_ds_id = src_ds_id;
+ txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
+ txn_req->req.commit_cfg.dst_ds_id = dst_ds_id;
+ txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
+ txn_req->req.commit_cfg.validate_only = validate_only;
+ txn_req->req.commit_cfg.abort = abort;
+ txn_req->req.commit_cfg.implicit = implicit;
+ txn_req->req.commit_cfg.cmt_stats =
+ mgmt_fe_get_session_commit_stats(txn->session_id);
+
+ /*
+ * Trigger a COMMIT-CONFIG process.
+ */
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+ return 0;
+}
+
+int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
+ bool connect)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ static struct mgmt_commit_stats dummy_stats;
+ struct nb_config_cbs *adapter_cfgs = NULL;
+
+ memset(&dummy_stats, 0, sizeof(dummy_stats));
+ if (connect) {
+ /* Get config for this single backend client */
+ mgmt_be_get_adapter_config(adapter, mm->running_ds,
+ &adapter_cfgs);
+
+ if (!adapter_cfgs || RB_EMPTY(nb_config_cbs, adapter_cfgs)) {
+ SET_FLAG(adapter->flags,
+ MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
+ return 0;
+ }
+
+ /*
+ * Create a CONFIG transaction to push the config changes
+ * provided to the backend client.
+ */
+ txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
+ if (!txn) {
+ MGMTD_TXN_ERR(
+ "Failed to create CONFIG Transaction for downloading CONFIGs for client '%s'",
+ adapter->name);
+ return -1;
+ }
+
+ MGMTD_TXN_DBG("Created initial txn %" PRIu64
+ " for BE connection %s",
+ txn->txn_id, adapter->name);
+ /*
+ * Set the changeset for transaction to commit and trigger the
+ * commit request.
+ */
+ txn_req =
+ mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
+ txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_NONE;
+ txn_req->req.commit_cfg.src_ds_ctx = 0;
+ txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_NONE;
+ txn_req->req.commit_cfg.dst_ds_ctx = 0;
+ txn_req->req.commit_cfg.validate_only = false;
+ txn_req->req.commit_cfg.abort = false;
+ txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
+ txn_req->req.commit_cfg.cfg_chgs = adapter_cfgs;
+
+ /*
+ * Trigger a COMMIT-CONFIG process.
+ */
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+
+ } else {
+ /*
+ * Check if any transaction is currently on-going that
+ * involves this backend client. If so, report the transaction
+ * has failed.
+ */
+ FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
+ if (txn->type == MGMTD_TXN_TYPE_CONFIG) {
+ cmtcfg_req = txn->commit_cfg_req
+ ? &txn->commit_cfg_req
+ ->req.commit_cfg
+ : NULL;
+ if (cmtcfg_req
+ && cmtcfg_req->subscr_info
+ .xpath_subscr[adapter->id]
+ .subscribed) {
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Backend daemon disconnected while processing commit!");
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+int mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create,
+ bool success,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
+ return -1;
+
+ if (!create && !txn->commit_cfg_req)
+ return 0;
+
+ assert(txn->commit_cfg_req);
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ if (create) {
+ if (success) {
+ /*
+ * Done with TXN_CREATE. Move the backend client to
+ * next phase.
+ */
+ assert(cmtcfg_req->curr_phase
+ == MGMTD_COMMIT_PHASE_TXN_CREATE);
+
+ /*
+ * Send CFGDATA_CREATE-REQs to the backend immediately.
+ */
+ mgmt_txn_send_be_cfg_data(txn, adapter);
+ } else {
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Internal error! Failed to initiate transaction at backend!");
+ }
+ } else {
+ /*
+ * Done with TXN_DELETE. Move the backend client to next phase.
+ */
+ if (false)
+ mgmt_move_be_commit_to_next_phase(txn, adapter);
+ }
+
+ return 0;
+}
+
+int mgmt_txn_notify_be_cfgdata_reply(
+ uint64_t txn_id, uint64_t batch_id, bool success, char *error_if_any,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_be_cfg_batch *cfg_btch;
+ struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
+ return -1;
+
+ if (!txn->commit_cfg_req)
+ return -1;
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+
+ cfg_btch = mgmt_txn_cfgbatch_id2ctx(txn, batch_id);
+ if (!cfg_btch || cfg_btch->txn != txn)
+ return -1;
+
+ if (!success) {
+ MGMTD_TXN_ERR(
+ "CFGDATA_CREATE_REQ sent to '%s' failed for Txn %p, Batch %p, Err: %s",
+ adapter->name, txn, cfg_btch,
+ error_if_any ? error_if_any : "None");
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ error_if_any ? error_if_any :
+ "Internal error! Failed to download config data to backend!");
+ return 0;
+ }
+
+ MGMTD_TXN_DBG(
+ "CFGDATA_CREATE_REQ sent to '%s' was successful for Txn %p, Batch %p, Err: %s",
+ adapter->name, txn, cfg_btch,
+ error_if_any ? error_if_any : "None");
+ mgmt_move_txn_cfg_batch_to_next(
+ cmtcfg_req, cfg_btch, &cmtcfg_req->curr_batches[adapter->id],
+ &cmtcfg_req->next_batches[adapter->id], true,
+ MGMTD_COMMIT_PHASE_APPLY_CFG);
+
+ mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
+
+ return 0;
+}
+
+int mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
+ uint64_t batch_ids[],
+ size_t num_batch_ids, char *error_if_any,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_be_cfg_batch *cfg_btch;
+ struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
+ size_t indx;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG
+ || !txn->commit_cfg_req)
+ return -1;
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+
+ if (!success) {
+ MGMTD_TXN_ERR(
+ "CFGDATA_APPLY_REQ sent to '%s' failed for Txn %p, Batches [0x%llx - 0x%llx], Err: %s",
+ adapter->name, txn, (unsigned long long)batch_ids[0],
+ (unsigned long long)batch_ids[num_batch_ids - 1],
+ error_if_any ? error_if_any : "None");
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ error_if_any ? error_if_any :
+ "Internal error! Failed to apply config data on backend!");
+ return 0;
+ }
+
+ for (indx = 0; indx < num_batch_ids; indx++) {
+ cfg_btch = mgmt_txn_cfgbatch_id2ctx(txn, batch_ids[indx]);
+ if (cfg_btch->txn != txn)
+ return -1;
+ mgmt_move_txn_cfg_batch_to_next(
+ cmtcfg_req, cfg_btch,
+ &cmtcfg_req->curr_batches[adapter->id],
+ &cmtcfg_req->next_batches[adapter->id], true,
+ MGMTD_COMMIT_PHASE_TXN_DELETE);
+ }
+
+ if (!mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id])) {
+ /*
+ * All configuration for the specific backend has been applied.
+ * Send TXN-DELETE to wrap up the transaction for this backend.
+ */
+ SET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
+ mgmt_txn_send_be_txn_delete(txn, adapter);
+ }
+
+ mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
+ if (mm->perf_stats_en)
+ gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_end, NULL);
+
+ return 0;
+}
+
+int mgmt_txn_send_commit_config_reply(uint64_t txn_id,
+ enum mgmt_result result,
+ const char *error_if_any)
+{
+ struct mgmt_txn_ctx *txn;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ if (!txn->commit_cfg_req) {
+ MGMTD_TXN_ERR(
+ "NO commit in-progress for Txn %p, session 0x%llx!",
+ txn, (unsigned long long)txn->session_id);
+ return -1;
+ }
+
+ return mgmt_txn_send_commit_cfg_reply(txn, result, error_if_any);
+}
+
+int mgmt_txn_send_get_config_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ Mgmtd__YangGetDataReq **data_req,
+ size_t num_reqs)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ size_t indx;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETCFG);
+ txn_req->req.get_data->ds_id = ds_id;
+ txn_req->req.get_data->ds_ctx = ds_ctx;
+ for (indx = 0;
+ indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
+ indx++) {
+ MGMTD_TXN_DBG("XPath: '%s'", data_req[indx]->data->xpath);
+ txn_req->req.get_data->xpaths[indx] =
+ strdup(data_req[indx]->data->xpath);
+ txn_req->req.get_data->num_xpaths++;
+ }
+
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
+
+ return 0;
+}
+
+int mgmt_txn_send_get_data_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ Mgmtd__YangGetDataReq **data_req,
+ size_t num_reqs)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ size_t indx;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETDATA);
+ txn_req->req.get_data->ds_id = ds_id;
+ txn_req->req.get_data->ds_ctx = ds_ctx;
+ for (indx = 0;
+ indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
+ indx++) {
+ MGMTD_TXN_DBG("XPath: '%s'", data_req[indx]->data->xpath);
+ txn_req->req.get_data->xpaths[indx] =
+ strdup(data_req[indx]->data->xpath);
+ txn_req->req.get_data->num_xpaths++;
+ }
+
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
+
+ return 0;
+}
+
+void mgmt_txn_status_write(struct vty *vty)
+{
+ struct mgmt_txn_ctx *txn;
+
+ vty_out(vty, "MGMTD Transactions\n");
+
+ FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
+ vty_out(vty, " Txn: \t\t\t%p\n", txn);
+ vty_out(vty, " Txn-Id: \t\t\t%llu\n",
+ (unsigned long long)txn->txn_id);
+ vty_out(vty, " Session-Id: \t\t%llu\n",
+ (unsigned long long)txn->session_id);
+ vty_out(vty, " Type: \t\t\t%s\n",
+ mgmt_txn_type2str(txn->type));
+ vty_out(vty, " Ref-Count: \t\t\t%d\n", txn->refcount);
+ }
+ vty_out(vty, " Total: %d\n",
+ (int)mgmt_txns_count(&mgmt_txn_mm->txn_list));
+}
+
+int mgmt_txn_rollback_trigger_cfg_apply(struct mgmt_ds_ctx *src_ds_ctx,
+ struct mgmt_ds_ctx *dst_ds_ctx)
+{
+ static struct nb_config_cbs changes;
+ struct nb_config_cbs *cfg_chgs = NULL;
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ static struct mgmt_commit_stats dummy_stats;
+
+ memset(&changes, 0, sizeof(changes));
+ memset(&dummy_stats, 0, sizeof(dummy_stats));
+ /*
+ * This could be the case when the config is directly
+ * loaded onto the candidate DS from a file. Get the
+ * diff from a full comparison of the candidate and
+ * running DSs.
+ */
+ nb_config_diff(mgmt_ds_get_nb_config(dst_ds_ctx),
+ mgmt_ds_get_nb_config(src_ds_ctx), &changes);
+ cfg_chgs = &changes;
+
+ if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
+ /*
+ * This means there's no changes to commit whatsoever
+ * is the source of the changes in config.
+ */
+ return -1;
+ }
+
+ /*
+ * Create a CONFIG transaction to push the config changes
+ * provided to the backend client.
+ */
+ txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
+ if (!txn) {
+ MGMTD_TXN_ERR(
+ "Failed to create CONFIG Transaction for downloading CONFIGs");
+ return -1;
+ }
+
+ MGMTD_TXN_DBG("Created rollback txn %" PRIu64, txn->txn_id);
+
+ /*
+ * Set the changeset for transaction to commit and trigger the commit
+ * request.
+ */
+ txn_req = mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
+ txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_CANDIDATE;
+ txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
+ txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_RUNNING;
+ txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
+ txn_req->req.commit_cfg.validate_only = false;
+ txn_req->req.commit_cfg.abort = false;
+ txn_req->req.commit_cfg.rollback = true;
+ txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
+ txn_req->req.commit_cfg.cfg_chgs = cfg_chgs;
+
+ /*
+ * Trigger a COMMIT-CONFIG process.
+ */
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+ return 0;
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Transactions
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_TXN_H_
+#define _FRR_MGMTD_TXN_H_
+
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
+
+#define MGMTD_TXN_PROC_DELAY_MSEC 5
+#define MGMTD_TXN_PROC_DELAY_USEC 10
+#define MGMTD_TXN_MAX_NUM_SETCFG_PROC 128
+#define MGMTD_TXN_MAX_NUM_GETCFG_PROC 128
+#define MGMTD_TXN_MAX_NUM_GETDATA_PROC 128
+
+#define MGMTD_TXN_SEND_CFGVALIDATE_DELAY_MSEC 100
+#define MGMTD_TXN_SEND_CFGAPPLY_DELAY_MSEC 100
+#define MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC 30000 /* 30 seconds */
+
+#define MGMTD_TXN_CLEANUP_DELAY_MSEC 100
+#define MGMTD_TXN_CLEANUP_DELAY_USEC 10
+
+/*
+ * The following definition enables local validation of config
+ * on the MGMTD process by loading client-defined NB callbacks
+ * and calling them locally before sening CNFG_APPLY_REQ to
+ * backend for actual apply of configuration on internal state
+ * of the backend application.
+ *
+ * #define MGMTD_LOCAL_VALIDATIONS_ENABLED
+ *
+ * Note: Enabled by default in configure.ac, if this needs to be
+ * disabled then pass --enable-mgmtd-local-validations=no to
+ * the list of arguments passed to ./configure
+ */
+
+PREDECL_LIST(mgmt_txns);
+
+struct mgmt_master;
+
+enum mgmt_txn_type {
+ MGMTD_TXN_TYPE_NONE = 0,
+ MGMTD_TXN_TYPE_CONFIG,
+ MGMTD_TXN_TYPE_SHOW
+};
+
+static inline const char *mgmt_txn_type2str(enum mgmt_txn_type type)
+{
+ switch (type) {
+ case MGMTD_TXN_TYPE_NONE:
+ return "None";
+ case MGMTD_TXN_TYPE_CONFIG:
+ return "CONFIG";
+ case MGMTD_TXN_TYPE_SHOW:
+ return "SHOW";
+ }
+
+ return "Unknown";
+}
+
+/* Initialise transaction module. */
+extern int mgmt_txn_init(struct mgmt_master *cm, struct event_loop *tm);
+
+/* Destroy the transaction module. */
+extern void mgmt_txn_destroy(void);
+
+/*
+ * Check if transaction is in progress.
+ *
+ * Returns:
+ * session ID if in-progress, MGMTD_SESSION_ID_NONE otherwise.
+ */
+extern uint64_t mgmt_config_txn_in_progress(void);
+
+/*
+ * Create transaction.
+ *
+ * session_id
+ * Session ID.
+ *
+ * type
+ * Transaction type (CONFIG/SHOW/NONE)
+ *
+ * Returns:
+ * transaction ID.
+ */
+extern uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type);
+
+/*
+ * Destroy transaction.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ */
+extern void mgmt_destroy_txn(uint64_t *txn_id);
+
+/*
+ * Check if transaction is valid given an ID.
+ */
+extern bool mgmt_txn_id_is_valid(uint64_t txn_id);
+
+/*
+ * Returns the type of transaction given an ID.
+ */
+extern enum mgmt_txn_type mgmt_get_txn_type(uint64_t txn_id);
+
+/*
+ * Send set-config request to be processed later in transaction.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * req_id
+ * Unique transaction request identifier.
+ *
+ * ds_id
+ * Datastore ID.
+ *
+ * ds_hndl
+ * Datastore handle.
+ *
+ * cfg_req
+ * Config requests.
+ *
+ * num_req
+ * Number of config requests.
+ *
+ * implicit_commit
+ * TRUE if the commit is implicit, FALSE otherwise.
+ *
+ * dst_ds_id
+ * Destination datastore ID.
+ *
+ * dst_ds_handle
+ * Destination datastore handle.
+ *
+ * Returns:
+ * 0 on success, -1 on failures.
+ */
+extern int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ Mgmtd__YangCfgDataReq **cfg_req,
+ size_t num_req, bool implicit_commit,
+ Mgmtd__DatastoreId dst_ds_id,
+ struct mgmt_ds_ctx *dst_ds_ctx);
+
+/*
+ * Send commit-config request to be processed later in transaction.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * req_id
+ * Unique transaction request identifier.
+ *
+ * src_ds_id
+ * Source datastore ID.
+ *
+ * src_ds_hndl
+ * Source Datastore handle.
+ *
+ * validate_only
+ * TRUE if commit request needs to be validated only, FALSE otherwise.
+ *
+ * abort
+ * TRUE if need to restore Src DS back to Dest DS, FALSE otherwise.
+ *
+ * implicit
+ * TRUE if the commit is implicit, FALSE otherwise.
+ *
+ * Returns:
+ * 0 on success, -1 on failures.
+ */
+extern int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId src_ds_id,
+ struct mgmt_ds_ctx *dst_ds_ctx,
+ Mgmtd__DatastoreId dst_ds_id,
+ struct mgmt_ds_ctx *src_ds_ctx,
+ bool validate_only, bool abort,
+ bool implicit);
+
+extern int mgmt_txn_send_commit_config_reply(uint64_t txn_id,
+ enum mgmt_result result,
+ const char *error_if_any);
+
+/*
+ * Send get-config request to be processed later in transaction.
+ *
+ * Similar to set-config request.
+ */
+extern int mgmt_txn_send_get_config_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ Mgmtd__YangGetDataReq **data_req,
+ size_t num_reqs);
+
+/*
+ * Send get-data request to be processed later in transaction.
+ *
+ * Similar to get-config request, but here data is fetched from backedn client.
+ */
+extern int mgmt_txn_send_get_data_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ Mgmtd__YangGetDataReq **data_req,
+ size_t num_reqs);
+
+/*
+ * Notifiy backend adapter on connection.
+ */
+extern int
+mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
+ bool connect);
+
+/*
+ * Reply to backend adapter about transaction create/delete.
+ */
+extern int
+mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create, bool success,
+ struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Reply to backend adapater with config data create request.
+ */
+extern int
+mgmt_txn_notify_be_cfgdata_reply(uint64_t txn_id, uint64_t batch_id,
+ bool success, char *error_if_any,
+ struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Reply to backend adapater with config data validate request.
+ */
+extern int mgmt_txn_notify_be_cfg_validate_reply(
+ uint64_t txn_id, bool success, uint64_t batch_ids[],
+ size_t num_batch_ids, char *error_if_any,
+ struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Reply to backend adapater with config data apply request.
+ */
+extern int
+mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
+ uint64_t batch_ids[],
+ size_t num_batch_ids, char *error_if_any,
+ struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Dump transaction status to vty.
+ */
+extern void mgmt_txn_status_write(struct vty *vty);
+
+/*
+ * Trigger rollback config apply.
+ *
+ * Creates a new transaction and commit request for rollback.
+ */
+extern int
+mgmt_txn_rollback_trigger_cfg_apply(struct mgmt_ds_ctx *src_ds_ctx,
+ struct mgmt_ds_ctx *dst_ds_ctx);
+#endif /* _FRR_MGMTD_TXN_H_ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD VTY Interface
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+
+#include "command.h"
+#include "json.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_be_server.h"
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt_fe_server.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+
+#include "mgmtd/mgmt_vty_clippy.c"
+
+DEFPY(show_mgmt_be_adapter,
+ show_mgmt_be_adapter_cmd,
+ "show mgmt backend-adapter all",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_BE_ADAPTER_STR
+ "Display all Backend Adapters\n")
+{
+ mgmt_be_adapter_status_write(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_be_xpath_reg,
+ show_mgmt_be_xpath_reg_cmd,
+ "show mgmt backend-yang-xpath-registry",
+ SHOW_STR
+ MGMTD_STR
+ "Backend Adapter YANG Xpath Registry\n")
+{
+ mgmt_be_xpath_register_write(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_fe_adapter, show_mgmt_fe_adapter_cmd,
+ "show mgmt frontend-adapter all [detail$detail]",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_FE_ADAPTER_STR
+ "Display all Frontend Adapters\n"
+ "Display more details\n")
+{
+ mgmt_fe_adapter_status_write(vty, !!detail);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY_HIDDEN(mgmt_performance_measurement,
+ mgmt_performance_measurement_cmd,
+ "[no] mgmt performance-measurement",
+ NO_STR
+ MGMTD_STR
+ "Enable performance measurement\n")
+{
+ if (no)
+ mgmt_fe_adapter_perf_measurement(vty, false);
+ else
+ mgmt_fe_adapter_perf_measurement(vty, true);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_reset_performance_stats,
+ mgmt_reset_performance_stats_cmd,
+ "mgmt reset-statistics",
+ MGMTD_STR
+ "Reset the Performance measurement statistics\n")
+{
+ mgmt_fe_adapter_reset_perf_stats(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_txn,
+ show_mgmt_txn_cmd,
+ "show mgmt transaction all",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_TXN_STR
+ "Display all Transactions\n")
+{
+ mgmt_txn_status_write(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_ds,
+ show_mgmt_ds_cmd,
+ "show mgmt datastore [all|candidate|operational|running]$dsname",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_DS_STR
+ "All datastores (default)\n"
+ "Candidate datastore\n"
+ "Operational datastore\n"
+ "Running datastore\n")
+{
+ struct mgmt_ds_ctx *ds_ctx;
+
+ if (!dsname || dsname[0] == 'a') {
+ mgmt_ds_status_write(vty);
+ return CMD_SUCCESS;
+ }
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, mgmt_ds_name2id(dsname));
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access %s datastore!\n", dsname);
+ return CMD_ERR_NO_MATCH;
+ }
+ mgmt_ds_status_write_one(vty, ds_ctx);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_commit,
+ mgmt_commit_cmd,
+ "mgmt commit <check|apply|abort>$type",
+ MGMTD_STR
+ "Commit action\n"
+ "Validate the set of config commands\n"
+ "Validate and apply the set of config commands\n"
+ "Abort and drop the set of config commands recently added\n")
+{
+ bool validate_only = type[0] == 'c';
+ bool abort = type[1] == 'b';
+
+ if (vty_mgmt_send_commit_config(vty, validate_only, abort) != 0)
+ return CMD_WARNING_CONFIG_FAILED;
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_set_config_data, mgmt_set_config_data_cmd,
+ "mgmt set-config WORD$path VALUE",
+ MGMTD_STR
+ "Set configuration data\n"
+ "XPath expression specifying the YANG data path\n"
+ "Value of the data to set\n")
+{
+ strlcpy(vty->cfg_changes[0].xpath, path,
+ sizeof(vty->cfg_changes[0].xpath));
+ vty->cfg_changes[0].value = value;
+ vty->cfg_changes[0].operation = NB_OP_CREATE;
+ vty->num_cfg_changes = 1;
+
+ vty->no_implicit_commit = true;
+ vty_mgmt_send_config_data(vty);
+ vty->no_implicit_commit = false;
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_delete_config_data, mgmt_delete_config_data_cmd,
+ "mgmt delete-config WORD$path",
+ MGMTD_STR
+ "Delete configuration data\n"
+ "XPath expression specifying the YANG data path\n")
+{
+
+ strlcpy(vty->cfg_changes[0].xpath, path,
+ sizeof(vty->cfg_changes[0].xpath));
+ vty->cfg_changes[0].value = NULL;
+ vty->cfg_changes[0].operation = NB_OP_DESTROY;
+ vty->num_cfg_changes = 1;
+
+ vty->no_implicit_commit = true;
+ vty_mgmt_send_config_data(vty);
+ vty->no_implicit_commit = false;
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_get_config, show_mgmt_get_config_cmd,
+ "show mgmt get-config [candidate|operational|running]$dsname WORD$path",
+ SHOW_STR MGMTD_STR
+ "Get configuration data from a specific configuration datastore\n"
+ "Candidate datastore (default)\n"
+ "Operational datastore\n"
+ "Running datastore\n"
+ "XPath expression specifying the YANG data path\n")
+{
+ const char *xpath_list[VTY_MAXCFGCHANGES] = {0};
+ Mgmtd__DatastoreId datastore = MGMTD_DS_CANDIDATE;
+
+ if (dsname)
+ datastore = mgmt_ds_name2id(dsname);
+
+ xpath_list[0] = path;
+ vty_mgmt_send_get_config(vty, datastore, xpath_list, 1);
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_get_data, show_mgmt_get_data_cmd,
+ "show mgmt get-data [candidate|operational|running]$dsname WORD$path",
+ SHOW_STR MGMTD_STR
+ "Get data from a specific datastore\n"
+ "Candidate datastore\n"
+ "Operational datastore (default)\n"
+ "Running datastore\n"
+ "XPath expression specifying the YANG data path\n")
+{
+ const char *xpath_list[VTY_MAXCFGCHANGES] = {0};
+ Mgmtd__DatastoreId datastore = MGMTD_DS_OPERATIONAL;
+
+ if (dsname)
+ datastore = mgmt_ds_name2id(dsname);
+
+ xpath_list[0] = path;
+ vty_mgmt_send_get_data(vty, datastore, xpath_list, 1);
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_dump_data,
+ show_mgmt_dump_data_cmd,
+ "show mgmt datastore-contents [candidate|operational|running]$dsname [xpath WORD$path] [file WORD$filepath] <json|xml>$fmt",
+ SHOW_STR
+ MGMTD_STR
+ "Get Datastore contents from a specific datastore\n"
+ "Candidate datastore (default)\n"
+ "Operational datastore\n"
+ "Running datastore\n"
+ "XPath expression specifying the YANG data path\n"
+ "XPath string\n"
+ "Dump the contents to a file\n"
+ "Full path of the file\n"
+ "json output\n"
+ "xml output\n")
+{
+ struct mgmt_ds_ctx *ds_ctx;
+ Mgmtd__DatastoreId datastore = MGMTD_DS_CANDIDATE;
+ LYD_FORMAT format = fmt[0] == 'j' ? LYD_JSON : LYD_XML;
+ FILE *f = NULL;
+
+ if (datastore)
+ datastore = mgmt_ds_name2id(dsname);
+
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, datastore);
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access datastore!\n");
+ return CMD_ERR_NO_MATCH;
+ }
+
+ if (filepath) {
+ f = fopen(filepath, "w");
+ if (!f) {
+ vty_out(vty,
+ "Could not open file pointed by filepath %s\n",
+ filepath);
+ return CMD_SUCCESS;
+ }
+ }
+
+ mgmt_ds_dump_tree(vty, ds_ctx, path, f, format);
+
+ if (f)
+ fclose(f);
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_map_xpath,
+ show_mgmt_map_xpath_cmd,
+ "show mgmt yang-xpath-subscription WORD$path",
+ SHOW_STR
+ MGMTD_STR
+ "Get YANG Backend Subscription\n"
+ "XPath expression specifying the YANG data path\n")
+{
+ mgmt_be_xpath_subscr_info_write(vty, path);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_load_config,
+ mgmt_load_config_cmd,
+ "mgmt load-config WORD$filepath <merge|replace>$type",
+ MGMTD_STR
+ "Load configuration onto Candidate Datastore\n"
+ "Full path of the file\n"
+ "Merge configuration with contents of Candidate Datastore\n"
+ "Replace the existing contents of Candidate datastore\n")
+{
+ bool merge = type[0] == 'm' ? true : false;
+ struct mgmt_ds_ctx *ds_ctx;
+ int ret;
+
+ if (access(filepath, F_OK) == -1) {
+ vty_out(vty, "ERROR: File %s : %s\n", filepath,
+ strerror(errno));
+ return CMD_ERR_NO_FILE;
+ }
+
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_CANDIDATE);
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access Candidate datastore!\n");
+ return CMD_ERR_NO_MATCH;
+ }
+
+ ret = mgmt_ds_load_config_from_file(ds_ctx, filepath, merge);
+ if (ret != 0)
+ vty_out(vty, "Error with parsing the file with error code %d\n",
+ ret);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_save_config,
+ mgmt_save_config_cmd,
+ "mgmt save-config <candidate|running>$dsname WORD$filepath",
+ MGMTD_STR
+ "Save configuration from datastore\n"
+ "Candidate datastore\n"
+ "Running datastore\n"
+ "Full path of the file\n")
+{
+ Mgmtd__DatastoreId datastore = mgmt_ds_name2id(dsname);
+ struct mgmt_ds_ctx *ds_ctx;
+ FILE *f;
+
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, datastore);
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access the '%s' datastore!\n",
+ dsname);
+ return CMD_ERR_NO_MATCH;
+ }
+
+ if (!filepath) {
+ vty_out(vty, "ERROR: No file path mentioned!\n");
+ return CMD_ERR_NO_MATCH;
+ }
+
+ f = fopen(filepath, "w");
+ if (!f) {
+ vty_out(vty, "Could not open file pointed by filepath %s\n",
+ filepath);
+ return CMD_SUCCESS;
+ }
+
+ mgmt_ds_dump_tree(vty, ds_ctx, "/", f, LYD_JSON);
+
+ fclose(f);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_cmt_hist,
+ show_mgmt_cmt_hist_cmd,
+ "show mgmt commit-history",
+ SHOW_STR
+ MGMTD_STR
+ "Show commit history\n")
+{
+ show_mgmt_cmt_history(vty);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_rollback,
+ mgmt_rollback_cmd,
+ "mgmt rollback <commit-id WORD$commit | last [(1-10)]$last>",
+ MGMTD_STR
+ "Rollback commits\n"
+ "Rollback to commit ID\n"
+ "Commit-ID\n"
+ "Rollbak n commits\n"
+ "Number of commits\n")
+{
+ if (commit)
+ mgmt_history_rollback_by_id(vty, commit);
+ else
+ mgmt_history_rollback_n(vty, last);
+
+ return CMD_SUCCESS;
+}
+
+static int config_write_mgmt_debug(struct vty *vty);
+static struct cmd_node debug_node = {
+ .name = "debug",
+ .node = DEBUG_NODE,
+ .prompt = "",
+ .config_write = config_write_mgmt_debug,
+};
+
+static int config_write_mgmt_debug(struct vty *vty)
+{
+ int n = mgmt_debug_be + mgmt_debug_fe + mgmt_debug_ds + mgmt_debug_txn;
+ if (!n)
+ return 0;
+ if (n == 4) {
+ vty_out(vty, "debug mgmt all\n");
+ return 0;
+ }
+
+ vty_out(vty, "debug mgmt");
+ if (mgmt_debug_be)
+ vty_out(vty, " backend");
+ if (mgmt_debug_ds)
+ vty_out(vty, " datastore");
+ if (mgmt_debug_fe)
+ vty_out(vty, " frontend");
+ if (mgmt_debug_txn)
+ vty_out(vty, " transaction");
+
+ vty_out(vty, "\n");
+
+ return 0;
+}
+
+DEFPY(debug_mgmt,
+ debug_mgmt_cmd,
+ "[no$no] debug mgmt <all$all|{backend$be|datastore$ds|frontend$fe|transaction$txn}>",
+ NO_STR
+ DEBUG_STR
+ MGMTD_STR
+ "All debug\n"
+ "Back-end debug\n"
+ "Datastore debug\n"
+ "Front-end debug\n"
+ "Transaction debug\n")
+{
+ bool set = !no;
+ if (all)
+ be = fe = ds = txn = set ? all : NULL;
+
+ if (be)
+ mgmt_debug_be = set;
+ if (ds)
+ mgmt_debug_ds = set;
+ if (fe)
+ mgmt_debug_fe = set;
+ if (txn)
+ mgmt_debug_txn = set;
+
+ return CMD_SUCCESS;
+}
+
+void mgmt_vty_init(void)
+{
+ /*
+ * Initialize command handling from VTYSH connection.
+ * Call command initialization routines defined by
+ * backend components that are moved to new MGMTD infra
+ * here one by one.
+ */
+#if HAVE_STATICD
+ extern void static_vty_init(void);
+ static_vty_init();
+#endif
+
+ install_node(&debug_node);
+
+ install_element(VIEW_NODE, &show_mgmt_be_adapter_cmd);
+ install_element(VIEW_NODE, &show_mgmt_be_xpath_reg_cmd);
+ install_element(VIEW_NODE, &show_mgmt_fe_adapter_cmd);
+ install_element(VIEW_NODE, &show_mgmt_txn_cmd);
+ install_element(VIEW_NODE, &show_mgmt_ds_cmd);
+ install_element(VIEW_NODE, &show_mgmt_get_config_cmd);
+ install_element(VIEW_NODE, &show_mgmt_get_data_cmd);
+ install_element(VIEW_NODE, &show_mgmt_dump_data_cmd);
+ install_element(VIEW_NODE, &show_mgmt_map_xpath_cmd);
+ install_element(VIEW_NODE, &show_mgmt_cmt_hist_cmd);
+
+ install_element(CONFIG_NODE, &mgmt_commit_cmd);
+ install_element(CONFIG_NODE, &mgmt_set_config_data_cmd);
+ install_element(CONFIG_NODE, &mgmt_delete_config_data_cmd);
+ install_element(CONFIG_NODE, &mgmt_load_config_cmd);
+ install_element(CONFIG_NODE, &mgmt_save_config_cmd);
+ install_element(CONFIG_NODE, &mgmt_rollback_cmd);
+
+ install_element(VIEW_NODE, &debug_mgmt_cmd);
+ install_element(CONFIG_NODE, &debug_mgmt_cmd);
+
+ /* Enable view */
+ install_element(ENABLE_NODE, &mgmt_performance_measurement_cmd);
+ install_element(ENABLE_NODE, &mgmt_reset_performance_stats_cmd);
+
+ /*
+ * TODO: Register and handlers for auto-completion here.
+ */
+}
--- /dev/null
+/*
+ * MGMTD VTY Interface
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include "command.h"
+#include "json.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_be_server.h"
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt_fe_server.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+
+#include "mgmtd/mgmt_vty_clippy.c"
+
+DEFPY(show_mgmt_be_adapter,
+ show_mgmt_be_adapter_cmd,
+ "show mgmt backend-adapter all",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_BE_ADAPTER_STR
+ "Display all Backend Adapters\n")
+{
+ mgmt_be_adapter_status_write(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_be_xpath_reg,
+ show_mgmt_be_xpath_reg_cmd,
+ "show mgmt backend-yang-xpath-registry",
+ SHOW_STR
+ MGMTD_STR
+ "Backend Adapter YANG Xpath Registry\n")
+{
+ mgmt_be_xpath_register_write(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_fe_adapter,
+ show_mgmt_fe_adapter_cmd,
+ "show mgmt frontend-adapter all",
+ SHOW_STR MGMTD_STR MGMTD_FE_ADAPTER_STR "Display all Frontend Adapters\n")
+{
+ mgmt_fe_adapter_status_write(vty, false);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_fe_adapter_detail, show_mgmt_fe_adapter_detail_cmd,
+ "show mgmt frontend-adapter all detail",
+ SHOW_STR MGMTD_STR MGMTD_FE_ADAPTER_STR
+ "Display all Frontend Adapters\n"
+ "Details of commit stats\n")
+{
+ mgmt_fe_adapter_status_write(vty, true);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY_HIDDEN(mgmt_performance_measurement,
+ mgmt_performance_measurement_cmd,
+ "[no] mgmt performance-measurement",
+ NO_STR
+ MGMTD_STR
+ "Enable performance measurement\n")
+{
+ if (no)
+ mgmt_fe_adapter_perf_measurement(vty, false);
+ else
+ mgmt_fe_adapter_perf_measurement(vty, true);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_reset_performance_stats,
+ mgmt_reset_performance_stats_cmd,
+ "mgmt reset-statistics",
+ MGMTD_STR
+ "Reset the Performance measurement statistics\n")
+{
+ mgmt_fe_adapter_reset_perf_stats(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_txn,
+ show_mgmt_txn_cmd,
+ "show mgmt transaction all",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_TXN_STR
+ "Display all Transactions\n")
+{
+ mgmt_txn_status_write(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_ds,
+ show_mgmt_ds_cmd,
+ "show mgmt datastore [all|candidate|operational|running]$dsname",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_DS_STR
+ "All datastores (default)\n"
+ "Candidate datastore\n"
+ "Operational datastore\n"
+ "Running datastore\n")
+{
+ struct mgmt_ds_ctx *ds_ctx;
+
+ if (!dsname || dsname[0] == 'a') {
+ mgmt_ds_status_write(vty);
+ return CMD_SUCCESS;
+ }
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, mgmt_ds_name2id(dsname));
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access %s datastore!\n", dsname);
+ return CMD_ERR_NO_MATCH;
+ }
+ mgmt_ds_status_write_one(vty, ds_ctx);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_commit,
+ mgmt_commit_cmd,
+ "mgmt commit <check|apply|abort>$type",
+ MGMTD_STR
+ "Commit action\n"
+ "Validate the set of config commands\n"
+ "Validate and apply the set of config commands\n"
+ "Abort and drop the set of config commands recently added\n")
+{
+ bool validate_only = type[0] == 'c';
+ bool abort = type[1] == 'b';
+
+ if (vty_mgmt_send_commit_config(vty, validate_only, abort) != 0)
+ return CMD_WARNING_CONFIG_FAILED;
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_set_config_data, mgmt_set_config_data_cmd,
+ "mgmt set-config WORD$path VALUE",
+ MGMTD_STR
+ "Set configuration data\n"
+ "XPath expression specifying the YANG data path\n"
+ "Value of the data to set\n")
+{
+ strlcpy(vty->cfg_changes[0].xpath, path,
+ sizeof(vty->cfg_changes[0].xpath));
+ vty->cfg_changes[0].value = value;
+ vty->cfg_changes[0].operation = NB_OP_CREATE;
+ vty->num_cfg_changes = 1;
+
+ vty->no_implicit_commit = true;
+ vty_mgmt_send_config_data(vty);
+ vty->no_implicit_commit = false;
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_delete_config_data, mgmt_delete_config_data_cmd,
+ "mgmt delete-config WORD$path",
+ MGMTD_STR
+ "Delete configuration data\n"
+ "XPath expression specifying the YANG data path\n")
+{
+
+ strlcpy(vty->cfg_changes[0].xpath, path,
+ sizeof(vty->cfg_changes[0].xpath));
+ vty->cfg_changes[0].value = NULL;
+ vty->cfg_changes[0].operation = NB_OP_DESTROY;
+ vty->num_cfg_changes = 1;
+
+ vty->no_implicit_commit = true;
+ vty_mgmt_send_config_data(vty);
+ vty->no_implicit_commit = false;
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_get_config, show_mgmt_get_config_cmd,
+ "show mgmt get-config [candidate|operational|running]$dsname WORD$path",
+ SHOW_STR MGMTD_STR
+ "Get configuration data from a specific configuration datastore\n"
+ "Candidate datastore (default)\n"
+ "Operational datastore\n"
+ "Running datastore\n"
+ "XPath expression specifying the YANG data path\n")
+{
+ const char *xpath_list[VTY_MAXCFGCHANGES] = {0};
+ Mgmtd__DatastoreId datastore = MGMTD_DS_CANDIDATE;
+
+ if (dsname)
+ datastore = mgmt_ds_name2id(dsname);
+
+ xpath_list[0] = path;
+ vty_mgmt_send_get_config(vty, datastore, xpath_list, 1);
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_get_data, show_mgmt_get_data_cmd,
+ "show mgmt get-data [candidate|operational|running]$dsname WORD$path",
+ SHOW_STR MGMTD_STR
+ "Get data from a specific datastore\n"
+ "Candidate datastore\n"
+ "Operational datastore (default)\n"
+ "Running datastore\n"
+ "XPath expression specifying the YANG data path\n")
+{
+ const char *xpath_list[VTY_MAXCFGCHANGES] = {0};
+ Mgmtd__DatastoreId datastore = MGMTD_DS_OPERATIONAL;
+
+ if (dsname)
+ datastore = mgmt_ds_name2id(dsname);
+
+ xpath_list[0] = path;
+ vty_mgmt_send_get_data(vty, datastore, xpath_list, 1);
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_dump_data,
+ show_mgmt_dump_data_cmd,
+ "show mgmt datastore-contents [candidate|operational|running]$dsname [xpath WORD$path] [file WORD$filepath] <json|xml>$fmt",
+ SHOW_STR
+ MGMTD_STR
+ "Get Datastore contents from a specific datastore\n"
+ "Candidate datastore (default)\n"
+ "Operational datastore\n"
+ "Running datastore\n"
+ "XPath expression specifying the YANG data path\n"
+ "XPath string\n"
+ "Dump the contents to a file\n"
+ "Full path of the file\n"
+ "json output\n"
+ "xml output\n")
+{
+ struct mgmt_ds_ctx *ds_ctx;
+ Mgmtd__DatastoreId datastore = MGMTD_DS_CANDIDATE;
+ LYD_FORMAT format = fmt[0] == 'j' ? LYD_JSON : LYD_XML;
+ FILE *f = NULL;
+
+ if (datastore)
+ datastore = mgmt_ds_name2id(dsname);
+
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, datastore);
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access datastore!\n");
+ return CMD_ERR_NO_MATCH;
+ }
+
+ if (filepath) {
+ f = fopen(filepath, "w");
+ if (!f) {
+ vty_out(vty,
+ "Could not open file pointed by filepath %s\n",
+ filepath);
+ return CMD_SUCCESS;
+ }
+ }
+
+ mgmt_ds_dump_tree(vty, ds_ctx, path, f, format);
+
+ if (f)
+ fclose(f);
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_map_xpath,
+ show_mgmt_map_xpath_cmd,
+ "show mgmt yang-xpath-subscription WORD$path",
+ SHOW_STR
+ MGMTD_STR
+ "Get YANG Backend Subscription\n"
+ "XPath expression specifying the YANG data path\n")
+{
+ mgmt_be_xpath_subscr_info_write(vty, path);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_load_config,
+ mgmt_load_config_cmd,
+ "mgmt load-config WORD$filepath <merge|replace>$type",
+ MGMTD_STR
+ "Load configuration onto Candidate Datastore\n"
+ "Full path of the file\n"
+ "Merge configuration with contents of Candidate Datastore\n"
+ "Replace the existing contents of Candidate datastore\n")
+{
+ bool merge = type[0] == 'm' ? true : false;
+ struct mgmt_ds_ctx *ds_ctx;
+ int ret;
+
+ if (access(filepath, F_OK) == -1) {
+ vty_out(vty, "ERROR: File %s : %s\n", filepath,
+ strerror(errno));
+ return CMD_ERR_NO_FILE;
+ }
+
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_CANDIDATE);
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access Candidate datastore!\n");
+ return CMD_ERR_NO_MATCH;
+ }
+
+ ret = mgmt_ds_load_config_from_file(ds_ctx, filepath, merge);
+ if (ret != 0)
+ vty_out(vty, "Error with parsing the file with error code %d\n",
+ ret);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_save_config,
+ mgmt_save_config_cmd,
+ "mgmt save-config <candidate|running>$dsname WORD$filepath",
+ MGMTD_STR
+ "Save configuration from datastore\n"
+ "Candidate datastore\n"
+ "Running datastore\n"
+ "Full path of the file\n")
+{
+ Mgmtd__DatastoreId datastore = mgmt_ds_name2id(dsname);
+ struct mgmt_ds_ctx *ds_ctx;
+ FILE *f;
+
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, datastore);
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access the '%s' datastore!\n",
+ dsname);
+ return CMD_ERR_NO_MATCH;
+ }
+
+ if (!filepath) {
+ vty_out(vty, "ERROR: No file path mentioned!\n");
+ return CMD_ERR_NO_MATCH;
+ }
+
+ f = fopen(filepath, "w");
+ if (!f) {
+ vty_out(vty, "Could not open file pointed by filepath %s\n",
+ filepath);
+ return CMD_SUCCESS;
+ }
+
+ mgmt_ds_dump_tree(vty, ds_ctx, "/", f, LYD_JSON);
+
+ fclose(f);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_cmt_hist,
+ show_mgmt_cmt_hist_cmd,
+ "show mgmt commit-history",
+ SHOW_STR
+ MGMTD_STR
+ "Show commit history\n")
+{
+ show_mgmt_cmt_history(vty);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_rollback,
+ mgmt_rollback_cmd,
+ "mgmt rollback <commit-id WORD$commit | last [(1-10)]$last>",
+ MGMTD_STR
+ "Rollback commits\n"
+ "Rollback to commit ID\n"
+ "Commit-ID\n"
+ "Rollbak n commits\n"
+ "Number of commits\n")
+{
+ if (commit)
+ mgmt_history_rollback_by_id(vty, commit);
+ else
+ mgmt_history_rollback_n(vty, last);
+
+ return CMD_SUCCESS;
+}
+
+static int config_write_mgmt_debug(struct vty *vty);
+static struct cmd_node debug_node = {
+ .name = "debug",
+ .node = DEBUG_NODE,
+ .prompt = "",
+ .config_write = config_write_mgmt_debug,
+};
+
+static int config_write_mgmt_debug(struct vty *vty)
+{
+ int n = mgmt_debug_be + mgmt_debug_fe + mgmt_debug_ds + mgmt_debug_txn;
+ if (!n)
+ return 0;
+ if (n == 4) {
+ vty_out(vty, "debug mgmt all\n");
+ return 0;
+ }
+
+ vty_out(vty, "debug mgmt");
+ if (mgmt_debug_be)
+ vty_out(vty, " backend");
+ if (mgmt_debug_ds)
+ vty_out(vty, " datastore");
+ if (mgmt_debug_fe)
+ vty_out(vty, " frontend");
+ if (mgmt_debug_txn)
+ vty_out(vty, " transaction");
+
+ vty_out(vty, "\n");
+
+ return 0;
+}
+
+DEFPY(debug_mgmt,
+ debug_mgmt_cmd,
+ "[no$no] debug mgmt <all$all|{backend$be|datastore$ds|frontend$fe|transaction$txn}>",
+ NO_STR
+ DEBUG_STR
+ MGMTD_STR
+ "All debug\n"
+ "Back-end debug\n"
+ "Datastore debug\n"
+ "Front-end debug\n"
+ "Transaction debug\n")
+{
+ bool set = !no;
+ if (all)
+ be = fe = ds = txn = set ? all : NULL;
+
+ if (be)
+ mgmt_debug_be = set;
+ if (ds)
+ mgmt_debug_ds = set;
+ if (fe)
+ mgmt_debug_fe = set;
+ if (txn)
+ mgmt_debug_txn = set;
+
+ return CMD_SUCCESS;
+}
+
+void mgmt_vty_init(void)
+{
+ /*
+ * Initialize command handling from VTYSH connection.
+ * Call command initialization routines defined by
+ * backend components that are moved to new MGMTD infra
+ * here one by one.
+ */
+#if HAVE_STATICD
+ extern void static_vty_init(void);
+ static_vty_init();
+#endif
+
+ install_node(&debug_node);
+
+ install_element(VIEW_NODE, &show_mgmt_be_adapter_cmd);
+ install_element(VIEW_NODE, &show_mgmt_be_xpath_reg_cmd);
+ install_element(VIEW_NODE, &show_mgmt_fe_adapter_cmd);
+ install_element(VIEW_NODE, &show_mgmt_fe_adapter_detail_cmd);
+ install_element(VIEW_NODE, &show_mgmt_txn_cmd);
+ install_element(VIEW_NODE, &show_mgmt_ds_cmd);
+ install_element(VIEW_NODE, &show_mgmt_get_config_cmd);
+ install_element(VIEW_NODE, &show_mgmt_get_data_cmd);
+ install_element(VIEW_NODE, &show_mgmt_dump_data_cmd);
+ install_element(VIEW_NODE, &show_mgmt_map_xpath_cmd);
+ install_element(VIEW_NODE, &show_mgmt_cmt_hist_cmd);
+
+ install_element(CONFIG_NODE, &mgmt_commit_cmd);
+ install_element(CONFIG_NODE, &mgmt_set_config_data_cmd);
+ install_element(CONFIG_NODE, &mgmt_delete_config_data_cmd);
+ install_element(CONFIG_NODE, &mgmt_load_config_cmd);
+ install_element(CONFIG_NODE, &mgmt_save_config_cmd);
+ install_element(CONFIG_NODE, &mgmt_rollback_cmd);
+
+ install_element(VIEW_NODE, &debug_mgmt_cmd);
+ install_element(CONFIG_NODE, &debug_mgmt_cmd);
+
+ /* Enable view */
+ install_element(ENABLE_NODE, &mgmt_performance_measurement_cmd);
+ install_element(ENABLE_NODE, &mgmt_reset_performance_stats_cmd);
+
+ /*
+ * TODO: Register and handlers for auto-completion here.
+ */
+}
--- /dev/null
+#
+# mgmtd -- Mangagement Daemon
+#
+
+# dist_examples_DATA += \
+ # end
+
+vtysh_daemons += mgmtd
+
+# man8 += $(MANBUILD)/frr-mgmtd.8
+# endif
+
+clippy_scan += \
+ mgmtd/mgmt_vty.c \
+ # end
+
+lib_LTLIBRARIES += mgmtd/libmgmt_be_nb.la
+nodist_mgmtd_libmgmt_be_nb_la_SOURCES = \
+ # end
+mgmtd_libmgmt_be_nb_la_CFLAGS = $(AM_CFLAGS) -DINCLUDE_MGMTD_CMDDEFS_ONLY
+mgmtd_libmgmt_be_nb_la_CPPFLAGS = $(AM_CPPFLAGS) -DINCLUDE_MGMTD_CMDDEFS_ONLY
+mgmtd_libmgmt_be_nb_la_LDFLAGS = -version-info 0:0:0
+
+noinst_LIBRARIES += mgmtd/libmgmtd.a
+mgmtd_libmgmtd_a_SOURCES = \
+ mgmtd/mgmt.c \
+ mgmtd/mgmt_ds.c \
+ mgmtd/mgmt_be_server.c \
+ mgmtd/mgmt_be_adapter.c \
+ mgmtd/mgmt_fe_server.c \
+ mgmtd/mgmt_fe_adapter.c \
+ mgmtd/mgmt_history.c \
+ mgmtd/mgmt_memory.c \
+ mgmtd/mgmt_txn.c \
+ mgmtd/mgmt_vty.c \
+ # end
+
+mgmtdheaderdir = $(pkgincludedir)/mgmtd
+mgmtdheader_HEADERS = \
+ mgmtd/mgmt_defines.h \
+ # end
+
+noinst_HEADERS += \
+ mgmtd/mgmt.h \
+ mgmtd/mgmt_be_server.h \
+ mgmtd/mgmt_be_adapter.h \
+ mgmtd/mgmt_ds.h \
+ mgmtd/mgmt_fe_server.h \
+ mgmtd/mgmt_fe_adapter.h \
+ mgmtd/mgmt_history.h \
+ mgmtd/mgmt_memory.h \
+ mgmtd/mgmt_txn.h \
+ # end
+
+sbin_PROGRAMS += mgmtd/mgmtd
+
+mgmtd_mgmtd_SOURCES = \
+ mgmtd/mgmt_main.c \
+ # end
+mgmtd_mgmtd_CFLAGS = $(AM_CFLAGS) -I ./
+mgmtd_mgmtd_LDADD = mgmtd/libmgmtd.a lib/libfrr.la $(LIBCAP) $(LIBM) $(LIBYANG_LIBS) $(UST_LIBS)
+mgmtd_mgmtd_LDADD += mgmtd/libmgmt_be_nb.la
+
+if STATICD
+$(mgmtd_mgmtd_OBJECTS): yang/frr-staticd.yang.c
+CLEANFILES += yang/frr-staticd.yang.c
+nodist_mgmtd_libmgmt_be_nb_la_SOURCES += staticd/static_vty.c
+endif
#include <linux/neighbour.h>
#include <linux/netfilter/nfnetlink_log.h>
-#include "thread.h"
+#include "frrevent.h"
#include "stream.h"
#include "prefix.h"
#include "nhrpd.h"
int netlink_nflog_group;
static int netlink_log_fd = -1;
-static struct thread *netlink_log_thread;
+static struct event *netlink_log_thread;
void netlink_update_binding(struct interface *ifp, union sockunion *proto,
union sockunion *nbma)
nhrp_peer_send_indication(ifp, htons(pkthdr->hw_protocol), &pktpl);
}
-static void netlink_log_recv(struct thread *t)
+static void netlink_log_recv(struct event *t)
{
uint8_t buf[ZNL_BUFFER_SIZE];
- int fd = THREAD_FD(t);
+ int fd = EVENT_FD(t);
struct zbuf payload, zb;
struct nlmsghdr *n;
}
}
- thread_add_read(master, netlink_log_recv, 0, netlink_log_fd,
- &netlink_log_thread);
+ event_add_read(master, netlink_log_recv, 0, netlink_log_fd,
+ &netlink_log_thread);
}
void netlink_set_nflog_group(int nlgroup)
{
if (netlink_log_fd >= 0) {
- thread_cancel(&netlink_log_thread);
+ event_cancel(&netlink_log_thread);
close(netlink_log_fd);
netlink_log_fd = -1;
}
return;
netlink_log_register(netlink_log_fd, nlgroup);
- thread_add_read(master, netlink_log_recv, 0, netlink_log_fd,
- &netlink_log_thread);
+ event_add_read(master, netlink_log_recv, 0, netlink_log_fd,
+ &netlink_log_thread);
}
}
#include "zebra.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "hash.h"
#include "nhrpd.h"
hash_release(nifp->cache_hash, c);
nhrp_peer_unref(c->cur.peer);
nhrp_peer_unref(c->new.peer);
- THREAD_OFF(c->t_timeout);
- THREAD_OFF(c->t_auth);
+ EVENT_OFF(c->t_timeout);
+ EVENT_OFF(c->t_auth);
XFREE(MTYPE_NHRP_CACHE, c);
}
create ? nhrp_cache_alloc : NULL);
}
-static void nhrp_cache_do_free(struct thread *t)
+static void nhrp_cache_do_free(struct event *t)
{
- struct nhrp_cache *c = THREAD_ARG(t);
+ struct nhrp_cache *c = EVENT_ARG(t);
c->t_timeout = NULL;
nhrp_cache_free(c);
}
-static void nhrp_cache_do_timeout(struct thread *t)
+static void nhrp_cache_do_timeout(struct event *t)
{
- struct nhrp_cache *c = THREAD_ARG(t);
+ struct nhrp_cache *c = EVENT_ARG(t);
c->t_timeout = NULL;
if (c->cur.type != NHRP_CACHE_INVALID)
static void nhrp_cache_reset_new(struct nhrp_cache *c)
{
- THREAD_OFF(c->t_auth);
+ EVENT_OFF(c->t_auth);
if (notifier_list_anywhere(&c->newpeer_notifier))
nhrp_peer_notify_del(c->new.peer, &c->newpeer_notifier);
nhrp_peer_unref(c->new.peer);
static void nhrp_cache_update_timers(struct nhrp_cache *c)
{
- THREAD_OFF(c->t_timeout);
+ EVENT_OFF(c->t_timeout);
switch (c->cur.type) {
case NHRP_CACHE_INVALID:
if (!c->t_auth)
- thread_add_timer_msec(master, nhrp_cache_do_free, c, 10,
- &c->t_timeout);
+ event_add_timer_msec(master, nhrp_cache_do_free, c, 10,
+ &c->t_timeout);
break;
case NHRP_CACHE_INCOMPLETE:
case NHRP_CACHE_NEGATIVE:
case NHRP_CACHE_LOCAL:
case NHRP_CACHE_NUM_TYPES:
if (c->cur.expires)
- thread_add_timer(master, nhrp_cache_do_timeout, c,
- c->cur.expires - monotime(NULL),
- &c->t_timeout);
+ event_add_timer(master, nhrp_cache_do_timeout, c,
+ c->cur.expires - monotime(NULL),
+ &c->t_timeout);
break;
}
}
nhrp_cache_update_timers(c);
}
-static void nhrp_cache_do_auth_timeout(struct thread *t)
+static void nhrp_cache_do_auth_timeout(struct event *t)
{
- struct nhrp_cache *c = THREAD_ARG(t);
+ struct nhrp_cache *c = EVENT_ARG(t);
c->t_auth = NULL;
nhrp_cache_authorize_binding(&c->eventid, (void *)"timeout");
}
if (nhrp_peer_check(c->new.peer, 1)) {
evmgr_notify("authorize-binding", c,
nhrp_cache_authorize_binding);
- thread_add_timer(master, nhrp_cache_do_auth_timeout, c,
- 10, &c->t_auth);
+ event_add_timer(master, nhrp_cache_do_auth_timeout, c,
+ 10, &c->t_auth);
}
break;
case NOTIFY_PEER_DOWN:
nhrp_cache_newpeer_notifier);
nhrp_cache_newpeer_notifier(&c->newpeer_notifier,
NOTIFY_PEER_UP);
- thread_add_timer(master, nhrp_cache_do_auth_timeout, c,
- 60, &c->t_auth);
+ event_add_timer(master, nhrp_cache_do_auth_timeout, c,
+ 60, &c->t_auth);
}
}
nhrp_cache_update_timers(c);
#include <sys/socket.h>
#include <sys/un.h>
-#include "thread.h"
+#include "frrevent.h"
#include "zbuf.h"
#include "log.h"
#include "nhrpd.h"
struct nhrp_reqid_pool nhrp_event_reqid;
struct event_manager {
- struct thread *t_reconnect, *t_read, *t_write;
+ struct event *t_reconnect, *t_read, *t_write;
struct zbuf ibuf;
struct zbuf_queue obuf;
int fd;
uint8_t ibuf_data[4 * 1024];
};
-static void evmgr_reconnect(struct thread *t);
+static void evmgr_reconnect(struct event *t);
static void evmgr_connection_error(struct event_manager *evmgr)
{
- THREAD_OFF(evmgr->t_read);
- THREAD_OFF(evmgr->t_write);
+ EVENT_OFF(evmgr->t_read);
+ EVENT_OFF(evmgr->t_write);
zbuf_reset(&evmgr->ibuf);
zbufq_reset(&evmgr->obuf);
close(evmgr->fd);
evmgr->fd = -1;
if (nhrp_event_socket_path)
- thread_add_timer_msec(master, evmgr_reconnect, evmgr, 10,
- &evmgr->t_reconnect);
+ event_add_timer_msec(master, evmgr_reconnect, evmgr, 10,
+ &evmgr->t_reconnect);
}
static void evmgr_recv_message(struct event_manager *evmgr, struct zbuf *zb)
}
}
-static void evmgr_read(struct thread *t)
+static void evmgr_read(struct event *t)
{
- struct event_manager *evmgr = THREAD_ARG(t);
+ struct event_manager *evmgr = EVENT_ARG(t);
struct zbuf *ibuf = &evmgr->ibuf;
struct zbuf msg;
while (zbuf_may_pull_until(ibuf, "\n\n", &msg))
evmgr_recv_message(evmgr, &msg);
- thread_add_read(master, evmgr_read, evmgr, evmgr->fd, &evmgr->t_read);
+ event_add_read(master, evmgr_read, evmgr, evmgr->fd, &evmgr->t_read);
}
-static void evmgr_write(struct thread *t)
+static void evmgr_write(struct event *t)
{
- struct event_manager *evmgr = THREAD_ARG(t);
+ struct event_manager *evmgr = EVENT_ARG(t);
int r;
r = zbufq_write(&evmgr->obuf, evmgr->fd);
if (r > 0) {
- thread_add_write(master, evmgr_write, evmgr, evmgr->fd,
- &evmgr->t_write);
+ event_add_write(master, evmgr_write, evmgr, evmgr->fd,
+ &evmgr->t_write);
} else if (r < 0) {
evmgr_connection_error(evmgr);
}
zbuf_put(obuf, "\n", 1);
zbufq_queue(&evmgr->obuf, obuf);
if (evmgr->fd >= 0)
- thread_add_write(master, evmgr_write, evmgr, evmgr->fd,
- &evmgr->t_write);
+ event_add_write(master, evmgr_write, evmgr, evmgr->fd,
+ &evmgr->t_write);
}
-static void evmgr_reconnect(struct thread *t)
+static void evmgr_reconnect(struct event *t)
{
- struct event_manager *evmgr = THREAD_ARG(t);
+ struct event_manager *evmgr = EVENT_ARG(t);
int fd;
if (evmgr->fd >= 0 || !nhrp_event_socket_path)
zlog_warn("%s: failure connecting nhrp-event socket: %s",
__func__, strerror(errno));
zbufq_reset(&evmgr->obuf);
- thread_add_timer(master, evmgr_reconnect, evmgr, 10,
- &evmgr->t_reconnect);
+ event_add_timer(master, evmgr_reconnect, evmgr, 10,
+ &evmgr->t_reconnect);
return;
}
zlog_info("Connected to Event Manager");
evmgr->fd = fd;
- thread_add_read(master, evmgr_read, evmgr, evmgr->fd, &evmgr->t_read);
+ event_add_read(master, evmgr_read, evmgr, evmgr->fd, &evmgr->t_read);
}
static struct event_manager evmgr_connection;
evmgr->fd = -1;
zbuf_init(&evmgr->ibuf, evmgr->ibuf_data, sizeof(evmgr->ibuf_data), 0);
zbufq_init(&evmgr->obuf);
- thread_add_timer_msec(master, evmgr_reconnect, evmgr, 10,
- &evmgr->t_reconnect);
+ event_add_timer_msec(master, evmgr_reconnect, evmgr, 10,
+ &evmgr->t_reconnect);
}
void evmgr_set_socket(const char *socket)
#include "zebra.h"
#include "linklist.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "nhrpd.h"
#include "os.h"
#include "zebra.h"
#include "privs.h"
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include "sigevent.h"
#include "lib/version.h"
#include "log.h"
unsigned int debug_flags = 0;
-struct thread_master *master;
+struct event_loop *master;
struct timeval current_time;
/* nhrpd options. */
#include <sys/types.h>
#include <sys/socket.h>
-#include "thread.h"
+#include "frrevent.h"
#include "nhrpd.h"
#include "netlink.h"
#include "znl.h"
int netlink_mcast_nflog_group;
static int netlink_mcast_log_fd = -1;
-static struct thread *netlink_mcast_log_thread;
+static struct event *netlink_mcast_log_thread;
struct mcast_ctx {
struct interface *ifp;
}
}
-static void netlink_mcast_log_recv(struct thread *t)
+static void netlink_mcast_log_recv(struct event *t)
{
uint8_t buf[65535]; /* Max OSPF Packet size */
- int fd = THREAD_FD(t);
+ int fd = EVENT_FD(t);
struct zbuf payload, zb;
struct nlmsghdr *n;
}
}
- thread_add_read(master, netlink_mcast_log_recv, 0, netlink_mcast_log_fd,
- &netlink_mcast_log_thread);
+ event_add_read(master, netlink_mcast_log_recv, 0, netlink_mcast_log_fd,
+ &netlink_mcast_log_thread);
}
static void netlink_mcast_log_register(int fd, int group)
void netlink_mcast_set_nflog_group(int nlgroup)
{
if (netlink_mcast_log_fd >= 0) {
- THREAD_OFF(netlink_mcast_log_thread);
+ EVENT_OFF(netlink_mcast_log_thread);
close(netlink_mcast_log_fd);
netlink_mcast_log_fd = -1;
debugf(NHRP_DEBUG_COMMON, "De-register nflog group");
return;
netlink_mcast_log_register(netlink_mcast_log_fd, nlgroup);
- thread_add_read(master, netlink_mcast_log_recv, 0,
- netlink_mcast_log_fd,
- &netlink_mcast_log_thread);
+ event_add_read(master, netlink_mcast_log_recv, 0,
+ netlink_mcast_log_fd, &netlink_mcast_log_thread);
debugf(NHRP_DEBUG_COMMON, "Register nflog group: %d",
netlink_mcast_nflog_group);
}
#include "zebra.h"
#include "zbuf.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "nhrpd.h"
#include "nhrp_protocol.h"
DEFINE_MTYPE_STATIC(NHRPD, NHRP_NHS, "NHRP next hop server");
DEFINE_MTYPE_STATIC(NHRPD, NHRP_REGISTRATION, "NHRP registration entries");
-static void nhrp_nhs_resolve(struct thread *t);
-static void nhrp_reg_send_req(struct thread *t);
+static void nhrp_nhs_resolve(struct event *t);
+static void nhrp_reg_send_req(struct event *t);
static void nhrp_reg_reply(struct nhrp_reqid *reqid, void *arg)
{
/* Success - schedule next registration, and route NHS */
r->timeout = 2;
holdtime = nifp->afi[nhs->afi].holdtime;
- THREAD_OFF(r->t_register);
+ EVENT_OFF(r->t_register);
/* RFC 2332 5.2.3 - Registration is recommend to be renewed
* every one third of holdtime */
- thread_add_timer(master, nhrp_reg_send_req, r, holdtime / 3,
- &r->t_register);
+ event_add_timer(master, nhrp_reg_send_req, r, holdtime / 3,
+ &r->t_register);
r->proto_addr = p->dst_proto;
c = nhrp_cache_get(ifp, &p->dst_proto, 1);
&cie_nbma_nhs);
}
-static void nhrp_reg_timeout(struct thread *t)
+static void nhrp_reg_timeout(struct event *t)
{
- struct nhrp_registration *r = THREAD_ARG(t);
+ struct nhrp_registration *r = EVENT_ARG(t);
struct nhrp_cache *c;
}
r->timeout = 2;
}
- thread_add_timer_msec(master, nhrp_reg_send_req, r, 10, &r->t_register);
+ event_add_timer_msec(master, nhrp_reg_send_req, r, 10, &r->t_register);
}
static void nhrp_reg_peer_notify(struct notifier_block *n, unsigned long cmd)
case NOTIFY_PEER_MTU_CHANGED:
debugf(NHRP_DEBUG_COMMON, "NHS: Flush timer for %pSU",
&r->peer->vc->remote.nbma);
- THREAD_OFF(r->t_register);
- thread_add_timer_msec(master, nhrp_reg_send_req, r, 10,
- &r->t_register);
+ EVENT_OFF(r->t_register);
+ event_add_timer_msec(master, nhrp_reg_send_req, r, 10,
+ &r->t_register);
break;
}
}
-static void nhrp_reg_send_req(struct thread *t)
+static void nhrp_reg_send_req(struct event *t)
{
- struct nhrp_registration *r = THREAD_ARG(t);
+ struct nhrp_registration *r = EVENT_ARG(t);
struct nhrp_nhs *nhs = r->nhs;
struct interface *ifp = nhs->ifp;
struct nhrp_interface *nifp = ifp->info;
if (!nhrp_peer_check(r->peer, 2)) {
debugf(NHRP_DEBUG_COMMON, "NHS: Waiting link for %pSU",
&r->peer->vc->remote.nbma);
- thread_add_timer(master, nhrp_reg_send_req, r, 120,
- &r->t_register);
+ event_add_timer(master, nhrp_reg_send_req, r, 120,
+ &r->t_register);
return;
}
- thread_add_timer(master, nhrp_reg_timeout, r, r->timeout,
- &r->t_register);
+ event_add_timer(master, nhrp_reg_timeout, r, r->timeout,
+ &r->t_register);
/* RFC2332 5.2.3 NHC uses it's own address as dst if NHS is unknown */
dst_proto = &nhs->proto_addr;
nhrp_peer_notify_del(r->peer, &r->peer_notifier);
nhrp_peer_unref(r->peer);
nhrp_reglist_del(&r->nhs->reglist_head, r);
- THREAD_OFF(r->t_register);
+ EVENT_OFF(r->t_register);
XFREE(MTYPE_NHRP_REGISTRATION, r);
}
if (n < 0) {
/* Failed, retry in a moment */
- thread_add_timer(master, nhrp_nhs_resolve, nhs, 5,
- &nhs->t_resolve);
+ event_add_timer(master, nhrp_nhs_resolve, nhs, 5,
+ &nhs->t_resolve);
return;
}
- thread_add_timer(master, nhrp_nhs_resolve, nhs, 2 * 60 * 60,
- &nhs->t_resolve);
+ event_add_timer(master, nhrp_nhs_resolve, nhs, 2 * 60 * 60,
+ &nhs->t_resolve);
frr_each (nhrp_reglist, &nhs->reglist_head, reg)
reg->mark = 1;
nhrp_reglist_add_tail(&nhs->reglist_head, reg);
nhrp_peer_notify_add(reg->peer, ®->peer_notifier,
nhrp_reg_peer_notify);
- thread_add_timer_msec(master, nhrp_reg_send_req, reg, 50,
- ®->t_register);
+ event_add_timer_msec(master, nhrp_reg_send_req, reg, 50,
+ ®->t_register);
}
frr_each_safe (nhrp_reglist, &nhs->reglist_head, reg)
nhrp_reg_delete(reg);
}
-static void nhrp_nhs_resolve(struct thread *t)
+static void nhrp_nhs_resolve(struct event *t)
{
- struct nhrp_nhs *nhs = THREAD_ARG(t);
+ struct nhrp_nhs *nhs = EVENT_ARG(t);
resolver_resolve(&nhs->dns_resolve, AF_INET, VRF_DEFAULT,
nhs->nbma_fqdn, nhrp_nhs_resolve_cb);
.reglist_head = INIT_DLIST(nhs->reglist_head),
};
nhrp_nhslist_add_tail(&nifp->afi[afi].nhslist_head, nhs);
- thread_add_timer_msec(master, nhrp_nhs_resolve, nhs, 1000,
- &nhs->t_resolve);
+ event_add_timer_msec(master, nhrp_nhs_resolve, nhs, 1000,
+ &nhs->t_resolve);
return NHRP_OK;
}
frr_each_safe (nhrp_reglist, &nhs->reglist_head, r)
nhrp_reg_delete(r);
- THREAD_OFF(nhs->t_resolve);
+ EVENT_OFF(nhs->t_resolve);
nhrp_nhslist_del(&nifp->afi[afi].nhslist_head, nhs);
free((void *)nhs->nbma_fqdn);
XFREE(MTYPE_NHRP_NHS, nhs);
#include <netinet/if_ether.h>
#include "nhrpd.h"
#include "zbuf.h"
-#include "thread.h"
+#include "frrevent.h"
#include "hash.h"
#include "nhrp_protocol.h"
return -1;
}
-static void nhrp_packet_recvraw(struct thread *t)
+static void nhrp_packet_recvraw(struct event *t)
{
- int fd = THREAD_FD(t), ifindex;
+ int fd = EVENT_FD(t), ifindex;
struct zbuf *zb;
struct interface *ifp;
struct nhrp_peer *p;
uint8_t addr[64];
size_t len, addrlen;
- thread_add_read(master, nhrp_packet_recvraw, 0, fd, NULL);
+ event_add_read(master, nhrp_packet_recvraw, 0, fd, NULL);
zb = zbuf_alloc(1500);
if (!zb)
int nhrp_packet_init(void)
{
- thread_add_read(master, nhrp_packet_recvraw, 0, os_socket(), NULL);
+ event_add_read(master, nhrp_packet_recvraw, 0, os_socket(), NULL);
return 0;
}
#include "zebra.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "hash.h"
#include "network.h"
debugf(NHRP_DEBUG_COMMON, "Deleting peer ref:%d remote:%pSU local:%pSU",
p->ref, &p->vc->remote.nbma, &p->vc->local.nbma);
- THREAD_OFF(p->t_fallback);
- THREAD_OFF(p->t_timer);
+ EVENT_OFF(p->t_fallback);
+ EVENT_OFF(p->t_timer);
hash_release(nifp->peer_hash, p);
nhrp_interface_notify_del(p->ifp, &p->ifp_notifier);
nhrp_vc_notify_del(p->vc, &p->vc_notifier);
XFREE(MTYPE_NHRP_PEER, p);
}
-static void nhrp_peer_notify_up(struct thread *t)
+static void nhrp_peer_notify_up(struct event *t)
{
- struct nhrp_peer *p = THREAD_ARG(t);
+ struct nhrp_peer *p = EVENT_ARG(t);
struct nhrp_vc *vc = p->vc;
struct interface *ifp = p->ifp;
struct nhrp_interface *nifp = ifp->info;
online = nifp->enabled && (!nifp->ipsec_profile || vc->ipsec);
if (p->online != online) {
- THREAD_OFF(p->t_fallback);
+ EVENT_OFF(p->t_fallback);
if (online && notifier_active(&p->notifier_list)) {
/* If we requested the IPsec connection, delay
* the up notification a bit to allow things
* settle down. This allows IKE to install
* SPDs and SAs. */
- thread_add_timer_msec(master, nhrp_peer_notify_up, p,
- 50, &p->t_fallback);
+ event_add_timer_msec(master, nhrp_peer_notify_up, p, 50,
+ &p->t_fallback);
} else {
nhrp_peer_ref(p);
p->online = online;
debugf(NHRP_DEBUG_COMMON, "Cleaning up undeleted peer entries (%lu)",
nifp->peer_hash ? nifp->peer_hash->count : 0);
- if (nifp->peer_hash) {
- hash_clean(nifp->peer_hash, do_peer_hash_free);
- assert(nifp->peer_hash->count == 0);
- hash_free(nifp->peer_hash);
- nifp->peer_hash = NULL;
- }
+ hash_clean_and_free(&nifp->peer_hash, do_peer_hash_free);
}
struct nhrp_peer *nhrp_peer_get(struct interface *ifp,
}
}
-static void nhrp_peer_request_timeout(struct thread *t)
+static void nhrp_peer_request_timeout(struct event *t)
{
- struct nhrp_peer *p = THREAD_ARG(t);
+ struct nhrp_peer *p = EVENT_ARG(t);
struct nhrp_vc *vc = p->vc;
struct interface *ifp = p->ifp;
struct nhrp_interface *nifp = ifp->info;
p->fallback_requested = 1;
vici_request_vc(nifp->ipsec_fallback_profile, &vc->local.nbma,
&vc->remote.nbma, p->prio);
- thread_add_timer(master, nhrp_peer_request_timeout, p, 30,
- &p->t_fallback);
+ event_add_timer(master, nhrp_peer_request_timeout, p, 30,
+ &p->t_fallback);
} else {
p->requested = p->fallback_requested = 0;
}
}
-static void nhrp_peer_defer_vici_request(struct thread *t)
+static void nhrp_peer_defer_vici_request(struct event *t)
{
- struct nhrp_peer *p = THREAD_ARG(t);
+ struct nhrp_peer *p = EVENT_ARG(t);
struct nhrp_vc *vc = p->vc;
struct interface *ifp = p->ifp;
struct nhrp_interface *nifp = ifp->info;
- THREAD_OFF(p->t_timer);
+ EVENT_OFF(p->t_timer);
if (p->online) {
debugf(NHRP_DEBUG_COMMON,
} else {
vici_request_vc(nifp->ipsec_profile, &vc->local.nbma,
&vc->remote.nbma, p->prio);
- thread_add_timer(
- master, nhrp_peer_request_timeout, p,
- (nifp->ipsec_fallback_profile && !p->prio) ? 15 : 30,
- &p->t_fallback);
+ event_add_timer(master, nhrp_peer_request_timeout, p,
+ (nifp->ipsec_fallback_profile && !p->prio) ? 15
+ : 30,
+ &p->t_fallback);
}
}
if (p->prio) {
vici_request_vc(nifp->ipsec_profile, &vc->local.nbma,
&vc->remote.nbma, p->prio);
- thread_add_timer(
- master, nhrp_peer_request_timeout, p,
- (nifp->ipsec_fallback_profile && !p->prio) ? 15 : 30,
- &p->t_fallback);
+ event_add_timer(master, nhrp_peer_request_timeout, p,
+ (nifp->ipsec_fallback_profile && !p->prio) ? 15
+ : 30,
+ &p->t_fallback);
} else {
/* Maximum timeout is 1 second */
int r_time_ms = frr_weak_random() % 1000;
debugf(NHRP_DEBUG_COMMON,
"Initiating IPsec connection request to %pSU after %d ms:",
&vc->remote.nbma, r_time_ms);
- thread_add_timer_msec(master, nhrp_peer_defer_vici_request,
- p, r_time_ms, &p->t_timer);
+ event_add_timer_msec(master, nhrp_peer_defer_vici_request, p,
+ r_time_ms, &p->t_timer);
}
return 0;
#include "nhrpd.h"
#include "table.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "log.h"
#include "nhrp_protocol.h"
static struct route_table *shortcut_rib[AFI_MAX];
-static void nhrp_shortcut_do_purge(struct thread *t);
+static void nhrp_shortcut_do_purge(struct event *t);
static void nhrp_shortcut_delete(struct nhrp_shortcut *s);
static void nhrp_shortcut_send_resolution_req(struct nhrp_shortcut *s);
}
}
-static void nhrp_shortcut_do_expire(struct thread *t)
+static void nhrp_shortcut_do_expire(struct event *t)
{
- struct nhrp_shortcut *s = THREAD_ARG(t);
+ struct nhrp_shortcut *s = EVENT_ARG(t);
- thread_add_timer(master, nhrp_shortcut_do_purge, s, s->holding_time / 3,
- &s->t_timer);
+ event_add_timer(master, nhrp_shortcut_do_purge, s, s->holding_time / 3,
+ &s->t_timer);
s->expiring = 1;
nhrp_shortcut_check_use(s);
}
s->route_installed = 0;
}
- THREAD_OFF(s->t_timer);
+ EVENT_OFF(s->t_timer);
if (holding_time) {
s->expiring = 0;
s->holding_time = holding_time;
- thread_add_timer(master, nhrp_shortcut_do_expire, s,
- 2 * holding_time / 3, &s->t_timer);
+ event_add_timer(master, nhrp_shortcut_do_expire, s,
+ 2 * holding_time / 3, &s->t_timer);
}
}
struct route_node *rn;
afi_t afi = family2afi(PREFIX_FAMILY(s->p));
- THREAD_OFF(s->t_timer);
+ EVENT_OFF(s->t_timer);
nhrp_reqid_free(&nhrp_packet_reqid, &s->reqid);
debugf(NHRP_DEBUG_ROUTE, "Shortcut %pFX purged", s->p);
}
}
-static void nhrp_shortcut_do_purge(struct thread *t)
+static void nhrp_shortcut_do_purge(struct event *t)
{
- struct nhrp_shortcut *s = THREAD_ARG(t);
+ struct nhrp_shortcut *s = EVENT_ARG(t);
s->t_timer = NULL;
nhrp_shortcut_delete(s);
}
int holding_time = pp->if_ad->holdtime;
nhrp_reqid_free(&nhrp_packet_reqid, &s->reqid);
- THREAD_OFF(s->t_timer);
- thread_add_timer(master, nhrp_shortcut_do_purge, s, 1, &s->t_timer);
+ EVENT_OFF(s->t_timer);
+ event_add_timer(master, nhrp_shortcut_do_purge, s, 1, &s->t_timer);
if (pp->hdr->type != NHRP_PACKET_RESOLUTION_REPLY) {
if (pp->hdr->type == NHRP_PACKET_ERROR_INDICATION
s = nhrp_shortcut_get(&p);
if (s && s->type != NHRP_CACHE_INCOMPLETE) {
s->addr = *addr;
- THREAD_OFF(s->t_timer);
- thread_add_timer(master, nhrp_shortcut_do_purge, s, 30,
- &s->t_timer);
+ EVENT_OFF(s->t_timer);
+ event_add_timer(master, nhrp_shortcut_do_purge, s, 30,
+ &s->t_timer);
nhrp_shortcut_send_resolution_req(s);
}
}
void nhrp_shortcut_purge(struct nhrp_shortcut *s, int force)
{
- THREAD_OFF(s->t_timer);
+ EVENT_OFF(s->t_timer);
nhrp_reqid_free(&nhrp_packet_reqid, &s->reqid);
if (force) {
/* Immediate purge on route with draw or pending shortcut */
- thread_add_timer_msec(master, nhrp_shortcut_do_purge, s, 5,
- &s->t_timer);
+ event_add_timer_msec(master, nhrp_shortcut_do_purge, s, 5,
+ &s->t_timer);
} else {
/* Soft expire - force immediate renewal, but purge
* in few seconds to make sure stale route is not
* This allows to keep nhrp route up, and to not
* cause temporary rerouting via hubs causing latency
* jitter. */
- thread_add_timer_msec(master, nhrp_shortcut_do_purge, s, 3000,
- &s->t_timer);
+ event_add_timer_msec(master, nhrp_shortcut_do_purge, s, 3000,
+ &s->t_timer);
s->expiring = 1;
nhrp_shortcut_check_use(s);
}
#include "memory.h"
#include "stream.h"
#include "hash.h"
-#include "thread.h"
+#include "frrevent.h"
#include "jhash.h"
#include "nhrpd.h"
#define NHRP_VTY_PORT 2610
#define NHRP_DEFAULT_CONFIG "nhrpd.conf"
-extern struct thread_master *master;
+extern struct event_loop *master;
enum { NHRP_OK = 0,
NHRP_ERR_FAIL,
struct notifier_list notifier_list;
struct interface *ifp;
struct nhrp_vc *vc;
- struct thread *t_fallback;
+ struct event *t_fallback;
struct notifier_block vc_notifier, ifp_notifier;
- struct thread *t_timer;
+ struct event *t_timer;
};
struct nhrp_packet_parser {
struct notifier_block newpeer_notifier;
struct notifier_list notifier_list;
struct nhrp_reqid eventid;
- struct thread *t_timeout;
- struct thread *t_auth;
+ struct event *t_timeout;
+ struct event *t_auth;
struct {
enum nhrp_cache_type type;
union sockunion addr;
struct nhrp_reqid reqid;
- struct thread *t_timer;
+ struct event *t_timer;
enum nhrp_cache_type type;
unsigned int holding_time;
union sockunion proto_addr;
const char *nbma_fqdn; /* IP-address or FQDN */
- struct thread *t_resolve;
+ struct event *t_resolve;
struct resolver_query dns_resolve;
struct nhrp_reglist_head reglist_head;
};
struct nhrp_registration {
struct nhrp_reglist_item reglist_entry;
- struct thread *t_register;
+ struct event *t_register;
struct nhrp_nhs *nhs;
struct nhrp_reqid reqid;
unsigned int timeout;
#include <sys/socket.h>
#include <sys/un.h>
-#include "thread.h"
+#include "frrevent.h"
#include "zbuf.h"
#include "log.h"
#include "lib_errors.h"
}
struct vici_conn {
- struct thread *t_reconnect, *t_read, *t_write;
+ struct event *t_reconnect, *t_read, *t_write;
struct zbuf ibuf;
struct zbuf_queue obuf;
int fd;
int nsections;
};
-static void vici_reconnect(struct thread *t);
+static void vici_reconnect(struct event *t);
static void vici_submit_request(struct vici_conn *vici, const char *name, ...);
static void vici_zbuf_puts(struct zbuf *obuf, const char *str)
{
nhrp_vc_reset();
- THREAD_OFF(vici->t_read);
- THREAD_OFF(vici->t_write);
+ EVENT_OFF(vici->t_read);
+ EVENT_OFF(vici->t_write);
zbuf_reset(&vici->ibuf);
zbufq_reset(&vici->obuf);
close(vici->fd);
vici->fd = -1;
- thread_add_timer(master, vici_reconnect, vici, 2, &vici->t_reconnect);
+ event_add_timer(master, vici_reconnect, vici, 2, &vici->t_reconnect);
}
static void vici_parse_message(struct vici_conn *vici, struct zbuf *msg,
}
}
-static void vici_read(struct thread *t)
+static void vici_read(struct event *t)
{
- struct vici_conn *vici = THREAD_ARG(t);
+ struct vici_conn *vici = EVENT_ARG(t);
struct zbuf *ibuf = &vici->ibuf;
struct zbuf pktbuf;
vici_recv_message(vici, &pktbuf);
} while (1);
- thread_add_read(master, vici_read, vici, vici->fd, &vici->t_read);
+ event_add_read(master, vici_read, vici, vici->fd, &vici->t_read);
}
-static void vici_write(struct thread *t)
+static void vici_write(struct event *t)
{
- struct vici_conn *vici = THREAD_ARG(t);
+ struct vici_conn *vici = EVENT_ARG(t);
int r;
r = zbufq_write(&vici->obuf, vici->fd);
if (r > 0) {
- thread_add_write(master, vici_write, vici, vici->fd,
- &vici->t_write);
+ event_add_write(master, vici_write, vici, vici->fd,
+ &vici->t_write);
} else if (r < 0) {
vici_connection_error(vici);
}
}
zbufq_queue(&vici->obuf, obuf);
- thread_add_write(master, vici_write, vici, vici->fd, &vici->t_write);
+ event_add_write(master, vici_write, vici, vici->fd, &vici->t_write);
}
static void vici_submit_request(struct vici_conn *vici, const char *name, ...)
return buff;
}
-static void vici_reconnect(struct thread *t)
+static void vici_reconnect(struct event *t)
{
- struct vici_conn *vici = THREAD_ARG(t);
+ struct vici_conn *vici = EVENT_ARG(t);
int fd;
char *file_path;
debugf(NHRP_DEBUG_VICI,
"%s: failure connecting VICI socket: %s", __func__,
strerror(errno));
- thread_add_timer(master, vici_reconnect, vici, 2,
- &vici->t_reconnect);
+ event_add_timer(master, vici_reconnect, vici, 2,
+ &vici->t_reconnect);
return;
}
debugf(NHRP_DEBUG_COMMON, "VICI: Connected");
vici->fd = fd;
- thread_add_read(master, vici_read, vici, vici->fd, &vici->t_read);
+ event_add_read(master, vici_read, vici, vici->fd, &vici->t_read);
/* Send event subscribtions */
// vici_register_event(vici, "child-updown");
vici->fd = -1;
zbuf_init(&vici->ibuf, vici->ibuf_data, sizeof(vici->ibuf_data), 0);
zbufq_init(&vici->obuf);
- thread_add_timer_msec(master, vici_reconnect, vici, 10,
- &vici->t_reconnect);
+ event_add_timer_msec(master, vici_reconnect, vici, 10,
+ &vici->t_reconnect);
}
void vici_terminate(void)
#include "vty.h"
#include "linklist.h"
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "plist.h"
#include "filter.h"
#include "log.h"
#include "memory.h"
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "command.h"
#include "if.h"
ospf6_spf_table_finish(oa->spf_table);
ospf6_route_remove_all(oa->route_table);
- THREAD_OFF(oa->thread_router_lsa);
- THREAD_OFF(oa->thread_intra_prefix_lsa);
+ EVENT_OFF(oa->thread_router_lsa);
+ EVENT_OFF(oa->thread_intra_prefix_lsa);
}
uint32_t spf_calculation; /* SPF calculation count */
- struct thread *thread_router_lsa;
- struct thread *thread_intra_prefix_lsa;
+ struct event *thread_router_lsa;
+ struct event *thread_intra_prefix_lsa;
uint32_t router_lsa_size_limit;
/* Area announce list */
#include "routemap.h"
#include "table.h"
#include "plist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "lib/northbound_cli.h"
return lsa;
}
-void ospf6_orig_as_external_lsa(struct thread *thread)
+void ospf6_orig_as_external_lsa(struct event *thread)
{
struct ospf6_interface *oi;
struct ospf6_lsa *lsa;
uint32_t type, adv_router;
- oi = (struct ospf6_interface *)THREAD_ARG(thread);
+ oi = (struct ospf6_interface *)EVENT_ARG(thread);
if (oi->state == OSPF6_INTERFACE_DOWN)
return;
ROUTEMAP(red) = NULL;
}
-static void ospf6_asbr_routemap_update_timer(struct thread *thread)
+static void ospf6_asbr_routemap_update_timer(struct event *thread)
{
- struct ospf6 *ospf6 = THREAD_ARG(thread);
+ struct ospf6 *ospf6 = EVENT_ARG(thread);
struct ospf6_redist *red;
int type;
{
SET_FLAG(red->flag, OSPF6_IS_RMAP_CHANGED);
- if (thread_is_scheduled(ospf6->t_distribute_update))
+ if (event_is_scheduled(ospf6->t_distribute_update))
return;
if (IS_OSPF6_DEBUG_ASBR)
zlog_debug("%s: trigger redistribute reset thread", __func__);
- thread_add_timer_msec(master, ospf6_asbr_routemap_update_timer, ospf6,
- OSPF_MIN_LS_INTERVAL,
- &ospf6->t_distribute_update);
+ event_add_timer_msec(master, ospf6_asbr_routemap_update_timer, ospf6,
+ OSPF_MIN_LS_INTERVAL, &ospf6->t_distribute_update);
}
void ospf6_asbr_routemap_update(const char *mapname)
if (IS_OSPF6_DEBUG_AGGR)
zlog_debug("%s: LSA found, refresh it",
__func__);
- THREAD_OFF(lsa->refresh);
- thread_add_event(master, ospf6_lsa_refresh, lsa, 0,
- &lsa->refresh);
+ EVENT_OFF(lsa->refresh);
+ event_add_event(master, ospf6_lsa_refresh, lsa, 0,
+ &lsa->refresh);
return;
}
}
aggr->action = OSPF6_ROUTE_AGGR_NONE;
ospf6_asbr_summary_config_delete(ospf6, rn);
- if (OSPF6_EXTERNAL_RT_COUNT(aggr))
- hash_clean(aggr->match_extnl_hash,
- ospf6_aggr_handle_external_info);
+ hash_clean_and_free(&aggr->match_extnl_hash,
+ ospf6_aggr_handle_external_info);
- hash_free(aggr->match_extnl_hash);
XFREE(MTYPE_OSPF6_EXTERNAL_RT_AGGR, aggr);
} else if (aggr->action == OSPF6_ROUTE_AGGR_MODIFY) {
void ospf6_external_aggregator_free(struct ospf6_external_aggr_rt *aggr)
{
- if (OSPF6_EXTERNAL_RT_COUNT(aggr))
- hash_clean(aggr->match_extnl_hash,
- ospf6_aggr_unlink_external_info);
+ hash_clean_and_free(&aggr->match_extnl_hash,
+ ospf6_aggr_unlink_external_info);
if (IS_OSPF6_DEBUG_AGGR)
zlog_debug("%s: Release the aggregator Address(%pFX)",
__func__,
&aggr->p);
- hash_free(aggr->match_extnl_hash);
- aggr->match_extnl_hash = NULL;
-
XFREE(MTYPE_OSPF6_EXTERNAL_RT_AGGR, aggr);
}
lsa = ospf6_find_external_lsa(ospf6, &rt->prefix);
if (lsa) {
- THREAD_OFF(lsa->refresh);
- thread_add_event(master, ospf6_lsa_refresh, lsa, 0,
- &lsa->refresh);
+ EVENT_OFF(lsa->refresh);
+ event_add_event(master, ospf6_lsa_refresh, lsa, 0,
+ &lsa->refresh);
} else {
if (IS_OSPF6_DEBUG_AGGR)
zlog_debug("%s: Originate external route(%pFX)",
}
}
-static void ospf6_asbr_summary_process(struct thread *thread)
+static void ospf6_asbr_summary_process(struct event *thread)
{
- struct ospf6 *ospf6 = THREAD_ARG(thread);
+ struct ospf6 *ospf6 = EVENT_ARG(thread);
int operation = 0;
operation = ospf6->aggr_action;
{
aggr->action = operation;
- if (thread_is_scheduled(ospf6->t_external_aggr)) {
+ if (event_is_scheduled(ospf6->t_external_aggr)) {
if (ospf6->aggr_action == OSPF6_ROUTE_AGGR_ADD) {
if (IS_OSPF6_DEBUG_AGGR)
if (IS_OSPF6_DEBUG_AGGR)
zlog_debug("%s, Restarting Aggregator delay timer.",
__func__);
- THREAD_OFF(ospf6->t_external_aggr);
+ EVENT_OFF(ospf6->t_external_aggr);
}
}
__func__, ospf6->aggr_delay_interval);
ospf6->aggr_action = operation;
- thread_add_timer(master,
- ospf6_asbr_summary_process,
- ospf6, ospf6->aggr_delay_interval,
- &ospf6->t_external_aggr);
+ event_add_timer(master, ospf6_asbr_summary_process, ospf6,
+ ospf6->aggr_delay_interval, &ospf6->t_external_aggr);
}
int ospf6_asbr_external_rt_advertise(struct ospf6 *ospf6,
#include "linklist.h"
#include "memory.h"
#include "prefix.h"
-#include "thread.h"
+#include "frrevent.h"
#include "buffer.h"
#include "stream.h"
#include "zclient.h"
if (bss->state == BFD_STATUS_DOWN
&& bss->previous_state == BFD_STATUS_UP) {
- THREAD_OFF(on->inactivity_timer);
- thread_add_event(master, inactivity_timer, on, 0, NULL);
+ EVENT_OFF(on->inactivity_timer);
+ event_add_event(master, inactivity_timer, on, 0, NULL);
}
}
#include <zebra.h>
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "vty.h"
#include "command.h"
lsdb_self = ospf6_get_scoped_lsdb_self(lsa);
ospf6_lsdb_add(ospf6_lsa_copy(lsa), lsdb_self);
- THREAD_OFF(lsa->refresh);
- thread_add_timer(master, ospf6_lsa_refresh, lsa, OSPF_LS_REFRESH_TIME,
- &lsa->refresh);
+ EVENT_OFF(lsa->refresh);
+ event_add_timer(master, ospf6_lsa_refresh, lsa, OSPF_LS_REFRESH_TIME,
+ &lsa->refresh);
if (IS_OSPF6_DEBUG_LSA_TYPE(lsa->header->type)
|| IS_OSPF6_DEBUG_ORIGINATE_TYPE(lsa->header->type)) {
self = ospf6_lsdb_lookup(lsa->header->type, lsa->header->id,
lsa->header->adv_router, lsdb_self);
if (self) {
- THREAD_OFF(self->expire);
- THREAD_OFF(self->refresh);
+ EVENT_OFF(self->expire);
+ EVENT_OFF(self->refresh);
ospf6_lsdb_remove(self, lsdb_self);
}
lsa->name);
lsa->external_lsa_id = old->external_lsa_id;
}
- THREAD_OFF(old->expire);
- THREAD_OFF(old->refresh);
+ EVENT_OFF(old->expire);
+ EVENT_OFF(old->refresh);
ospf6_flood_clear(old);
}
monotime(&now);
if (!OSPF6_LSA_IS_MAXAGE(lsa)) {
- thread_add_timer(master, ospf6_lsa_expire, lsa,
- OSPF_LSA_MAXAGE + lsa->birth.tv_sec
- - now.tv_sec,
- &lsa->expire);
+ event_add_timer(master, ospf6_lsa_expire, lsa,
+ OSPF_LSA_MAXAGE + lsa->birth.tv_sec -
+ now.tv_sec,
+ &lsa->expire);
} else
lsa->expire = NULL;
ospf6_lsdb_add(ospf6_lsa_copy(lsa),
on->retrans_list);
- thread_add_timer(
- master, ospf6_lsupdate_send_neighbor,
- on, on->ospf6_if->rxmt_interval,
- &on->thread_send_lsupdate);
+ event_add_timer(master,
+ ospf6_lsupdate_send_neighbor,
+ on, on->ospf6_if->rxmt_interval,
+ &on->thread_send_lsupdate);
retrans_added++;
}
}
if ((oi->type == OSPF_IFTYPE_BROADCAST)
|| (oi->type == OSPF_IFTYPE_POINTOPOINT)) {
ospf6_lsdb_add(ospf6_lsa_copy(lsa), oi->lsupdate_list);
- thread_add_event(master, ospf6_lsupdate_send_interface, oi, 0,
- &oi->thread_send_lsupdate);
+ event_add_event(master, ospf6_lsupdate_send_interface, oi, 0,
+ &oi->thread_send_lsupdate);
} else {
/* reschedule retransmissions to all neighbors */
for (ALL_LIST_ELEMENTS(oi->neighbor_list, node, nnode, on)) {
- THREAD_OFF(on->thread_send_lsupdate);
- thread_add_event(master, ospf6_lsupdate_send_neighbor,
- on, 0, &on->thread_send_lsupdate);
+ EVENT_OFF(on->thread_send_lsupdate);
+ event_add_event(master, ospf6_lsupdate_send_neighbor,
+ on, 0, &on->thread_send_lsupdate);
}
}
}
"Delayed acknowledgement (BDR & MoreRecent & from DR)");
/* Delayed acknowledgement */
ospf6_lsdb_add(ospf6_lsa_copy(lsa), oi->lsack_list);
- thread_add_timer(master, ospf6_lsack_send_interface, oi,
- 3, &oi->thread_send_lsack);
+ event_add_timer(master, ospf6_lsack_send_interface, oi,
+ 3, &oi->thread_send_lsack);
} else {
if (is_debug)
zlog_debug(
"Delayed acknowledgement (BDR & Duplicate & ImpliedAck & from DR)");
/* Delayed acknowledgement */
ospf6_lsdb_add(ospf6_lsa_copy(lsa), oi->lsack_list);
- thread_add_timer(master, ospf6_lsack_send_interface, oi,
- 3, &oi->thread_send_lsack);
+ event_add_timer(master, ospf6_lsack_send_interface, oi,
+ 3, &oi->thread_send_lsack);
} else {
if (is_debug)
zlog_debug(
if (is_debug)
zlog_debug("Direct acknowledgement (BDR & Duplicate)");
ospf6_lsdb_add(ospf6_lsa_copy(lsa), from->lsack_list);
- thread_add_event(master, ospf6_lsack_send_neighbor, from, 0,
- &from->thread_send_lsack);
+ event_add_event(master, ospf6_lsack_send_neighbor, from, 0,
+ &from->thread_send_lsack);
return;
}
"Delayed acknowledgement (AllOther & MoreRecent)");
/* Delayed acknowledgement */
ospf6_lsdb_add(ospf6_lsa_copy(lsa), oi->lsack_list);
- thread_add_timer(master, ospf6_lsack_send_interface, oi, 3,
- &oi->thread_send_lsack);
+ event_add_timer(master, ospf6_lsack_send_interface, oi, 3,
+ &oi->thread_send_lsack);
return;
}
zlog_debug(
"Direct acknowledgement (AllOther & Duplicate)");
ospf6_lsdb_add(ospf6_lsa_copy(lsa), from->lsack_list);
- thread_add_event(master, ospf6_lsack_send_neighbor, from, 0,
- &from->thread_send_lsack);
+ event_add_event(master, ospf6_lsack_send_neighbor, from, 0,
+ &from->thread_send_lsack);
return;
}
/* a) Acknowledge back to neighbor (Direct acknowledgement,
* 13.5) */
ospf6_lsdb_add(ospf6_lsa_copy(new), from->lsack_list);
- thread_add_event(master, ospf6_lsack_send_neighbor, from, 0,
- &from->thread_send_lsack);
+ event_add_event(master, ospf6_lsack_send_neighbor, from, 0,
+ &from->thread_send_lsack);
/* b) Discard */
ospf6_lsa_delete(new);
"Newer instance of the self-originated LSA");
zlog_debug("Schedule reorigination");
}
- thread_add_event(master, ospf6_lsa_refresh, new, 0,
- &new->refresh);
+ event_add_event(master, ospf6_lsa_refresh, new, 0,
+ &new->refresh);
}
/* GR: check for network topology change. */
new->name);
/* BadLSReq */
- thread_add_event(master, bad_lsreq, from, 0, NULL);
+ event_add_event(master, bad_lsreq, from, 0, NULL);
ospf6_lsa_delete(new);
return;
ospf6_lsdb_add(ospf6_lsa_copy(old),
from->lsupdate_list);
- thread_add_event(master, ospf6_lsupdate_send_neighbor,
- from, 0, &from->thread_send_lsupdate);
+ event_add_event(master, ospf6_lsupdate_send_neighbor,
+ from, 0, &from->thread_send_lsupdate);
ospf6_lsa_delete(new);
return;
ospf6->gr_info.restart_in_progress = false;
ospf6->gr_info.finishing_restart = true;
- THREAD_OFF(ospf6->gr_info.t_grace_period);
+ EVENT_OFF(ospf6->gr_info.t_grace_period);
/* Record in non-volatile memory that the restart is complete. */
ospf6_gr_nvm_delete(ospf6);
}
/* Handling of grace period expiry. */
-static void ospf6_gr_grace_period_expired(struct thread *thread)
+static void ospf6_gr_grace_period_expired(struct event *thread)
{
- struct ospf6 *ospf6 = THREAD_ARG(thread);
+ struct ospf6 *ospf6 = EVENT_ARG(thread);
ospf6_gr_restart_exit(ospf6, "grace period has expired");
}
zlog_debug(
"GR: remaining time until grace period expires: %lu(s)",
remaining_time);
- thread_add_timer(master, ospf6_gr_grace_period_expired,
- ospf6, remaining_time,
- &ospf6->gr_info.t_grace_period);
+ event_add_timer(master, ospf6_gr_grace_period_expired,
+ ospf6, remaining_time,
+ &ospf6->gr_info.t_grace_period);
}
}
if (ospf6->ospf6_helper_cfg.enable_rtr_list == NULL)
return;
- hash_clean(ospf6->ospf6_helper_cfg.enable_rtr_list,
- ospf6_disable_rtr_hash_free);
- hash_free(ospf6->ospf6_helper_cfg.enable_rtr_list);
- ospf6->ospf6_helper_cfg.enable_rtr_list = NULL;
+ hash_clean_and_free(&ospf6->ospf6_helper_cfg.enable_rtr_list,
+ ospf6_disable_rtr_hash_free);
}
/*
* Returns:
* Nothing
*/
-static void ospf6_handle_grace_timer_expiry(struct thread *thread)
+static void ospf6_handle_grace_timer_expiry(struct event *thread)
{
- struct ospf6_neighbor *nbr = THREAD_ARG(thread);
+ struct ospf6_neighbor *nbr = EVENT_ARG(thread);
ospf6_gr_helper_exit(nbr, OSPF6_GR_HELPER_GRACE_TIMEOUT);
}
}
if (OSPF6_GR_IS_ACTIVE_HELPER(restarter)) {
- THREAD_OFF(restarter->gr_helper_info.t_grace_timer);
+ EVENT_OFF(restarter->gr_helper_info.t_grace_timer);
if (ospf6->ospf6_helper_cfg.active_restarter_cnt > 0)
ospf6->ospf6_helper_cfg.active_restarter_cnt--;
actual_grace_interval);
/* Start the grace timer */
- thread_add_timer(master, ospf6_handle_grace_timer_expiry, restarter,
- actual_grace_interval,
- &restarter->gr_helper_info.t_grace_timer);
+ event_add_timer(master, ospf6_handle_grace_timer_expiry, restarter,
+ actual_grace_interval,
+ &restarter->gr_helper_info.t_grace_timer);
return OSPF6_GR_ACTIVE_HELPER;
}
* expiry, stop the grace timer.
*/
if (reason != OSPF6_GR_HELPER_GRACE_TIMEOUT)
- THREAD_OFF(nbr->gr_helper_info.t_grace_timer);
+ EVENT_OFF(nbr->gr_helper_info.t_grace_timer);
if (ospf6->ospf6_helper_cfg.active_restarter_cnt <= 0) {
zlog_err(
vty_out(vty, " Actual Grace period : %d(in seconds)\n",
nbr->gr_helper_info.actual_grace_period);
vty_out(vty, " Remaining GraceTime:%ld(in seconds).\n",
- thread_timer_remain_second(
+ event_timer_remain_second(
nbr->gr_helper_info.t_grace_timer));
vty_out(vty, " Graceful Restart reason: %s.\n\n",
ospf6_restart_reason_desc[nbr->gr_helper_info
json_object_int_add(json_neigh, "actualGraceInterval",
nbr->gr_helper_info.actual_grace_period);
json_object_int_add(json_neigh, "remainGracetime",
- thread_timer_remain_second(
- nbr->gr_helper_info.t_grace_timer));
+ event_timer_remain_second(
+ nbr->gr_helper_info.t_grace_timer));
json_object_string_add(json_neigh, "restartReason",
ospf6_restart_reason_desc[
nbr->gr_helper_info.gr_restart_reason]);
#include "if.h"
#include "log.h"
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "plist.h"
#include "zclient.h"
list_delete(&oi->neighbor_list);
- THREAD_OFF(oi->thread_send_hello);
- THREAD_OFF(oi->thread_send_lsupdate);
- THREAD_OFF(oi->thread_send_lsack);
- THREAD_OFF(oi->thread_sso);
- THREAD_OFF(oi->thread_wait_timer);
+ EVENT_OFF(oi->thread_send_hello);
+ EVENT_OFF(oi->thread_send_lsupdate);
+ EVENT_OFF(oi->thread_send_lsack);
+ EVENT_OFF(oi->thread_sso);
+ EVENT_OFF(oi->thread_wait_timer);
ospf6_lsdb_remove_all(oi->lsdb);
ospf6_lsdb_remove_all(oi->lsupdate_list);
{
SET_FLAG(oi->flag, OSPF6_INTERFACE_DISABLE);
- thread_execute(master, interface_down, oi, 0);
+ event_execute(master, interface_down, oi, 0);
ospf6_lsdb_remove_all(oi->lsdb);
ospf6_lsdb_remove_all(oi->lsdb_self);
ospf6_lsdb_remove_all(oi->lsupdate_list);
ospf6_lsdb_remove_all(oi->lsack_list);
- THREAD_OFF(oi->thread_send_hello);
- THREAD_OFF(oi->thread_send_lsupdate);
- THREAD_OFF(oi->thread_send_lsack);
- THREAD_OFF(oi->thread_sso);
+ EVENT_OFF(oi->thread_send_hello);
+ EVENT_OFF(oi->thread_send_lsupdate);
+ EVENT_OFF(oi->thread_send_lsack);
+ EVENT_OFF(oi->thread_sso);
- THREAD_OFF(oi->thread_network_lsa);
- THREAD_OFF(oi->thread_link_lsa);
- THREAD_OFF(oi->thread_intra_prefix_lsa);
- THREAD_OFF(oi->thread_as_extern_lsa);
- THREAD_OFF(oi->thread_wait_timer);
+ EVENT_OFF(oi->thread_network_lsa);
+ EVENT_OFF(oi->thread_link_lsa);
+ EVENT_OFF(oi->thread_intra_prefix_lsa);
+ EVENT_OFF(oi->thread_as_extern_lsa);
+ EVENT_OFF(oi->thread_wait_timer);
}
static struct in6_addr *
if (if_is_operative(ifp)
&& (ospf6_interface_get_linklocal_address(oi->interface)
|| if_is_loopback(oi->interface)))
- thread_execute(master, interface_up, oi, 0);
+ event_execute(master, interface_up, oi, 0);
else
- thread_execute(master, interface_down, oi, 0);
+ event_execute(master, interface_down, oi, 0);
return;
}
if (on->state < OSPF6_NEIGHBOR_TWOWAY)
continue;
/* Schedule AdjOK. */
- thread_add_event(master, adj_ok, on, 0,
- &on->thread_adj_ok);
+ event_add_event(master, adj_ok, on, 0,
+ &on->thread_adj_ok);
}
}
#endif /* __FreeBSD__ */
/* Interface State Machine */
-void interface_up(struct thread *thread)
+void interface_up(struct event *thread)
{
struct ospf6_interface *oi;
struct ospf6 *ospf6;
- oi = (struct ospf6_interface *)THREAD_ARG(thread);
+ oi = (struct ospf6_interface *)EVENT_ARG(thread);
assert(oi && oi->interface);
if (!oi->type_cfg)
oi->type = ospf6_default_iftype(oi->interface);
- thread_cancel(&oi->thread_sso);
+ event_cancel(&oi->thread_sso);
if (IS_OSPF6_DEBUG_INTERFACE)
zlog_debug("Interface Event %s: [InterfaceUp]",
zlog_info(
"Interface %s is still in all routers group, rescheduling for SSO",
oi->interface->name);
- thread_add_timer(master, interface_up, oi,
- OSPF6_INTERFACE_SSO_RETRY_INT,
- &oi->thread_sso);
+ event_add_timer(master, interface_up, oi,
+ OSPF6_INTERFACE_SSO_RETRY_INT, &oi->thread_sso);
return;
}
#endif /* __FreeBSD__ */
zlog_info(
"Scheduling %s for sso retry, trial count: %d",
oi->interface->name, oi->sso_try_cnt);
- thread_add_timer(master, interface_up, oi,
- OSPF6_INTERFACE_SSO_RETRY_INT,
- &oi->thread_sso);
+ event_add_timer(master, interface_up, oi,
+ OSPF6_INTERFACE_SSO_RETRY_INT,
+ &oi->thread_sso);
}
return;
}
/* Schedule Hello */
if (!CHECK_FLAG(oi->flag, OSPF6_INTERFACE_PASSIVE)
&& !if_is_loopback(oi->interface)) {
- thread_add_timer(master, ospf6_hello_send, oi, 0,
- &oi->thread_send_hello);
+ event_add_timer(master, ospf6_hello_send, oi, 0,
+ &oi->thread_send_hello);
}
/* decide next interface state */
ospf6_interface_state_change(OSPF6_INTERFACE_DROTHER, oi);
else {
ospf6_interface_state_change(OSPF6_INTERFACE_WAITING, oi);
- thread_add_timer(master, wait_timer, oi, oi->dead_interval,
- &oi->thread_wait_timer);
+ event_add_timer(master, wait_timer, oi, oi->dead_interval,
+ &oi->thread_wait_timer);
}
}
-void wait_timer(struct thread *thread)
+void wait_timer(struct event *thread)
{
struct ospf6_interface *oi;
- oi = (struct ospf6_interface *)THREAD_ARG(thread);
+ oi = (struct ospf6_interface *)EVENT_ARG(thread);
assert(oi && oi->interface);
if (IS_OSPF6_DEBUG_INTERFACE)
ospf6_interface_state_change(dr_election(oi), oi);
}
-void backup_seen(struct thread *thread)
+void backup_seen(struct event *thread)
{
struct ospf6_interface *oi;
- oi = (struct ospf6_interface *)THREAD_ARG(thread);
+ oi = (struct ospf6_interface *)EVENT_ARG(thread);
assert(oi && oi->interface);
if (IS_OSPF6_DEBUG_INTERFACE)
ospf6_interface_state_change(dr_election(oi), oi);
}
-void neighbor_change(struct thread *thread)
+void neighbor_change(struct event *thread)
{
struct ospf6_interface *oi;
- oi = (struct ospf6_interface *)THREAD_ARG(thread);
+ oi = (struct ospf6_interface *)EVENT_ARG(thread);
assert(oi && oi->interface);
if (IS_OSPF6_DEBUG_INTERFACE)
ospf6_interface_state_change(dr_election(oi), oi);
}
-void interface_down(struct thread *thread)
+void interface_down(struct event *thread)
{
struct ospf6_interface *oi;
struct listnode *node, *nnode;
struct ospf6_neighbor *on;
struct ospf6 *ospf6;
- oi = (struct ospf6_interface *)THREAD_ARG(thread);
+ oi = (struct ospf6_interface *)EVENT_ARG(thread);
assert(oi && oi->interface);
if (IS_OSPF6_DEBUG_INTERFACE)
oi->interface->name);
/* Stop Hellos */
- THREAD_OFF(oi->thread_send_hello);
+ EVENT_OFF(oi->thread_send_hello);
/* Stop trying to set socket options. */
- THREAD_OFF(oi->thread_sso);
+ EVENT_OFF(oi->thread_sso);
/* Cease the HELPER role for all the neighbours
* of this interface.
if (oi->on_write_q) {
listnode_delete(ospf6->oi_write_q, oi);
if (list_isempty(ospf6->oi_write_q))
- thread_cancel(&ospf6->t_write);
+ event_cancel(&ospf6->t_write);
oi->on_write_q = 0;
}
if (use_json) {
timerclear(&res);
- if (thread_is_scheduled(oi->thread_send_lsupdate))
+ if (event_is_scheduled(oi->thread_send_lsupdate))
timersub(&oi->thread_send_lsupdate->u.sands, &now,
&res);
timerstring(&res, duration, sizeof(duration));
duration);
json_object_string_add(
json_obj, "lsUpdateSendThread",
- (thread_is_scheduled(oi->thread_send_lsupdate)
- ? "on"
- : "off"));
+ (event_is_scheduled(oi->thread_send_lsupdate) ? "on"
+ : "off"));
json_arr = json_object_new_array();
for (ALL_LSDB(oi->lsupdate_list, lsa, lsanext))
json_arr);
timerclear(&res);
- if (thread_is_scheduled(oi->thread_send_lsack))
+ if (event_is_scheduled(oi->thread_send_lsack))
timersub(&oi->thread_send_lsack->u.sands, &now, &res);
timerstring(&res, duration, sizeof(duration));
duration);
json_object_string_add(
json_obj, "lsAckSendThread",
- (thread_is_scheduled(oi->thread_send_lsack) ? "on"
- : "off"));
+ (event_is_scheduled(oi->thread_send_lsack) ? "on"
+ : "off"));
json_arr = json_object_new_array();
for (ALL_LSDB(oi->lsack_list, lsa, lsanext))
} else {
timerclear(&res);
- if (thread_is_scheduled(oi->thread_send_lsupdate))
+ if (event_is_scheduled(oi->thread_send_lsupdate))
timersub(&oi->thread_send_lsupdate->u.sands, &now,
&res);
timerstring(&res, duration, sizeof(duration));
vty_out(vty,
" %d Pending LSAs for LSUpdate in Time %s [thread %s]\n",
oi->lsupdate_list->count, duration,
- (thread_is_scheduled(oi->thread_send_lsupdate)
- ? "on"
- : "off"));
+ (event_is_scheduled(oi->thread_send_lsupdate) ? "on"
+ : "off"));
for (ALL_LSDB(oi->lsupdate_list, lsa, lsanext))
vty_out(vty, " %s\n", lsa->name);
timerclear(&res);
- if (thread_is_scheduled(oi->thread_send_lsack))
+ if (event_is_scheduled(oi->thread_send_lsack))
timersub(&oi->thread_send_lsack->u.sands, &now, &res);
timerstring(&res, duration, sizeof(duration));
vty_out(vty,
" %d Pending LSAs for LSAck in Time %s [thread %s]\n",
oi->lsack_list->count, duration,
- (thread_is_scheduled(oi->thread_send_lsack) ? "on"
- : "off"));
+ (event_is_scheduled(oi->thread_send_lsack) ? "on"
+ : "off"));
for (ALL_LSDB(oi->lsack_list, lsa, lsanext))
vty_out(vty, " %s\n", lsa->name);
}
/* re-establish adjacencies */
for (ALL_LIST_ELEMENTS(oi->neighbor_list, node, nnode, on)) {
- THREAD_OFF(on->inactivity_timer);
- thread_add_event(master, inactivity_timer, on, 0, NULL);
+ EVENT_OFF(on->inactivity_timer);
+ event_add_event(master, inactivity_timer, on, 0, NULL);
}
return CMD_SUCCESS;
/* re-establish adjacencies */
for (ALL_LIST_ELEMENTS(oi->neighbor_list, node, nnode, on)) {
- THREAD_OFF(on->inactivity_timer);
- thread_add_event(master, inactivity_timer, on, 0, NULL);
+ EVENT_OFF(on->inactivity_timer);
+ event_add_event(master, inactivity_timer, on, 0, NULL);
}
return CMD_SUCCESS;
/*
* If the thread is scheduled, send the new hello now.
*/
- if (thread_is_scheduled(oi->thread_send_hello)) {
- THREAD_OFF(oi->thread_send_hello);
+ if (event_is_scheduled(oi->thread_send_hello)) {
+ EVENT_OFF(oi->thread_send_hello);
- thread_add_timer(master, ospf6_hello_send, oi, 0,
- &oi->thread_send_hello);
+ event_add_timer(master, ospf6_hello_send, oi, 0,
+ &oi->thread_send_hello);
}
return CMD_SUCCESS;
}
assert(oi);
SET_FLAG(oi->flag, OSPF6_INTERFACE_PASSIVE);
- THREAD_OFF(oi->thread_send_hello);
- THREAD_OFF(oi->thread_sso);
+ EVENT_OFF(oi->thread_send_hello);
+ EVENT_OFF(oi->thread_sso);
for (ALL_LIST_ELEMENTS(oi->neighbor_list, node, nnode, on)) {
- THREAD_OFF(on->inactivity_timer);
- thread_add_event(master, inactivity_timer, on, 0, NULL);
+ EVENT_OFF(on->inactivity_timer);
+ event_add_event(master, inactivity_timer, on, 0, NULL);
}
return CMD_SUCCESS;
assert(oi);
UNSET_FLAG(oi->flag, OSPF6_INTERFACE_PASSIVE);
- THREAD_OFF(oi->thread_send_hello);
- THREAD_OFF(oi->thread_sso);
+ EVENT_OFF(oi->thread_send_hello);
+ EVENT_OFF(oi->thread_sso);
/* don't send hellos over loopback interface */
if (!if_is_loopback(oi->interface))
- thread_add_timer(master, ospf6_hello_send, oi, 0,
- &oi->thread_send_hello);
+ event_add_timer(master, ospf6_hello_send, oi, 0,
+ &oi->thread_send_hello);
return CMD_SUCCESS;
}
}
/* Reset the interface */
- thread_execute(master, interface_down, oi, 0);
- thread_execute(master, interface_up, oi, 0);
+ event_execute(master, interface_down, oi, 0);
+ event_execute(master, interface_up, oi, 0);
return CMD_SUCCESS;
}
oi->type = type;
/* Reset the interface */
- thread_execute(master, interface_down, oi, 0);
- thread_execute(master, interface_up, oi, 0);
+ event_execute(master, interface_down, oi, 0);
+ event_execute(master, interface_up, oi, 0);
return CMD_SUCCESS;
}
zlog_debug("Interface %s: clear by reset", ifp->name);
/* Reset the interface */
- thread_execute(master, interface_down, oi, 0);
- thread_execute(master, interface_up, oi, 0);
+ event_execute(master, interface_down, oi, 0);
+ event_execute(master, interface_up, oi, 0);
}
/* Clear interface */
/* Interface socket setting trial counter, resets on success */
uint8_t sso_try_cnt;
- struct thread *thread_sso;
+ struct event *thread_sso;
/* OSPF6 Interface flag */
char flag;
struct ospf6_lsdb *lsack_list;
/* Ongoing Tasks */
- struct thread *thread_send_hello;
- struct thread *thread_send_lsupdate;
- struct thread *thread_send_lsack;
+ struct event *thread_send_hello;
+ struct event *thread_send_lsupdate;
+ struct event *thread_send_lsack;
- struct thread *thread_network_lsa;
- struct thread *thread_link_lsa;
- struct thread *thread_intra_prefix_lsa;
- struct thread *thread_as_extern_lsa;
- struct thread *thread_wait_timer;
+ struct event *thread_network_lsa;
+ struct event *thread_link_lsa;
+ struct event *thread_intra_prefix_lsa;
+ struct event *thread_as_extern_lsa;
+ struct event *thread_wait_timer;
struct ospf6_route_table *route_connected;
ospf6_interface_get_global_address(struct interface *ifp);
/* interface event */
-extern void interface_up(struct thread *thread);
-extern void interface_down(struct thread *thread);
-extern void wait_timer(struct thread *thread);
-extern void backup_seen(struct thread *thread);
-extern void neighbor_change(struct thread *thread);
+extern void interface_up(struct event *thread);
+extern void interface_down(struct event *thread);
+extern void wait_timer(struct event *thread);
+extern void backup_seen(struct event *thread);
+extern void neighbor_change(struct event *thread);
extern void ospf6_interface_init(void);
extern void ospf6_interface_clear(struct interface *ifp);
#include "log.h"
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "if.h"
#include "prefix.h"
return OSPF6_NOT_STUB_ROUTER;
}
-void ospf6_router_lsa_originate(struct thread *thread)
+void ospf6_router_lsa_originate(struct event *thread)
{
struct ospf6_area *oa;
uint32_t router;
int count;
- oa = (struct ospf6_area *)THREAD_ARG(thread);
+ oa = (struct ospf6_area *)EVENT_ARG(thread);
if (oa->ospf6->gr_info.restart_in_progress) {
if (IS_DEBUG_OSPF6_GR)
return 0;
}
-void ospf6_network_lsa_originate(struct thread *thread)
+void ospf6_network_lsa_originate(struct event *thread)
{
struct ospf6_interface *oi;
struct listnode *i;
uint16_t type;
- oi = (struct ospf6_interface *)THREAD_ARG(thread);
+ oi = (struct ospf6_interface *)EVENT_ARG(thread);
/* The interface must be enabled until here. A Network-LSA of a
disabled interface (but was once enabled) should be flushed
return 0;
}
-void ospf6_link_lsa_originate(struct thread *thread)
+void ospf6_link_lsa_originate(struct event *thread)
{
struct ospf6_interface *oi;
struct ospf6_route *route;
struct ospf6_prefix *op;
- oi = (struct ospf6_interface *)THREAD_ARG(thread);
+ oi = (struct ospf6_interface *)EVENT_ARG(thread);
assert(oi->area);
return 0;
}
-void ospf6_intra_prefix_lsa_originate_stub(struct thread *thread)
+void ospf6_intra_prefix_lsa_originate_stub(struct event *thread)
{
struct ospf6_area *oa;
struct ospf6_route_table *route_advertise;
int ls_id = 0;
- oa = (struct ospf6_area *)THREAD_ARG(thread);
+ oa = (struct ospf6_area *)EVENT_ARG(thread);
if (oa->ospf6->gr_info.restart_in_progress) {
if (IS_DEBUG_OSPF6_GR)
}
-void ospf6_intra_prefix_lsa_originate_transit(struct thread *thread)
+void ospf6_intra_prefix_lsa_originate_transit(struct event *thread)
{
struct ospf6_interface *oi;
char *start, *end, *current;
uint16_t type;
- oi = (struct ospf6_interface *)THREAD_ARG(thread);
+ oi = (struct ospf6_interface *)EVENT_ARG(thread);
assert(oi->area);
#define OSPF6_ROUTER_LSA_SCHEDULE(oa) \
do { \
if (CHECK_FLAG((oa)->flag, OSPF6_AREA_ENABLE)) \
- thread_add_event(master, ospf6_router_lsa_originate, \
- oa, 0, &(oa)->thread_router_lsa); \
+ event_add_event(master, ospf6_router_lsa_originate, \
+ oa, 0, &(oa)->thread_router_lsa); \
} while (0)
#define OSPF6_NETWORK_LSA_SCHEDULE(oi) \
do { \
if (!CHECK_FLAG((oi)->flag, OSPF6_INTERFACE_DISABLE)) \
- thread_add_event(master, ospf6_network_lsa_originate, \
- oi, 0, &(oi)->thread_network_lsa); \
+ event_add_event(master, ospf6_network_lsa_originate, \
+ oi, 0, &(oi)->thread_network_lsa); \
} while (0)
#define OSPF6_LINK_LSA_SCHEDULE(oi) \
do { \
if (!CHECK_FLAG((oi)->flag, OSPF6_INTERFACE_DISABLE)) \
- thread_add_event(master, ospf6_link_lsa_originate, oi, \
- 0, &(oi)->thread_link_lsa); \
+ event_add_event(master, ospf6_link_lsa_originate, oi, \
+ 0, &(oi)->thread_link_lsa); \
} while (0)
#define OSPF6_INTRA_PREFIX_LSA_SCHEDULE_STUB(oa) \
do { \
if (CHECK_FLAG((oa)->flag, OSPF6_AREA_ENABLE)) \
- thread_add_event( \
+ event_add_event( \
master, ospf6_intra_prefix_lsa_originate_stub, \
oa, 0, &(oa)->thread_intra_prefix_lsa); \
} while (0)
#define OSPF6_INTRA_PREFIX_LSA_SCHEDULE_TRANSIT(oi) \
do { \
if (!CHECK_FLAG((oi)->flag, OSPF6_INTERFACE_DISABLE)) \
- thread_add_event( \
+ event_add_event( \
master, \
ospf6_intra_prefix_lsa_originate_transit, oi, \
0, &(oi)->thread_intra_prefix_lsa); \
#define OSPF6_AS_EXTERN_LSA_SCHEDULE(oi) \
do { \
if (!CHECK_FLAG((oi)->flag, OSPF6_INTERFACE_DISABLE)) \
- thread_add_event(master, ospf6_orig_as_external_lsa, \
- oi, 0, &(oi)->thread_as_extern_lsa); \
+ event_add_event(master, ospf6_orig_as_external_lsa, \
+ oi, 0, &(oi)->thread_as_extern_lsa); \
} while (0)
#define OSPF6_ROUTER_LSA_EXECUTE(oa) \
do { \
if (CHECK_FLAG((oa)->flag, OSPF6_AREA_ENABLE)) \
- thread_execute(master, ospf6_router_lsa_originate, oa, \
- 0); \
+ event_execute(master, ospf6_router_lsa_originate, oa, \
+ 0); \
} while (0)
#define OSPF6_NETWORK_LSA_EXECUTE(oi) \
do { \
- THREAD_OFF((oi)->thread_network_lsa); \
- thread_execute(master, ospf6_network_lsa_originate, oi, 0); \
+ EVENT_OFF((oi)->thread_network_lsa); \
+ event_execute(master, ospf6_network_lsa_originate, oi, 0); \
} while (0)
#define OSPF6_LINK_LSA_EXECUTE(oi) \
do { \
if (!CHECK_FLAG((oi)->flag, OSPF6_INTERFACE_DISABLE)) \
- thread_execute(master, ospf6_link_lsa_originate, oi, \
- 0); \
+ event_execute(master, ospf6_link_lsa_originate, oi, \
+ 0); \
} while (0)
#define OSPF6_INTRA_PREFIX_LSA_EXECUTE_TRANSIT(oi) \
do { \
- THREAD_OFF((oi)->thread_intra_prefix_lsa); \
- thread_execute(master, \
- ospf6_intra_prefix_lsa_originate_transit, oi, \
- 0); \
+ EVENT_OFF((oi)->thread_intra_prefix_lsa); \
+ event_execute(master, \
+ ospf6_intra_prefix_lsa_originate_transit, oi, \
+ 0); \
} while (0)
#define OSPF6_AS_EXTERN_LSA_EXECUTE(oi) \
do { \
- THREAD_OFF((oi)->thread_as_extern_lsa); \
- thread_execute(master, ospf6_orig_as_external_lsa, oi, 0); \
+ EVENT_OFF((oi)->thread_as_extern_lsa); \
+ event_execute(master, ospf6_orig_as_external_lsa, oi, 0); \
} while (0)
/* Function Prototypes */
struct ospf6_lsa *lsa);
extern int ospf6_router_is_stub_router(struct ospf6_lsa *lsa);
-extern void ospf6_router_lsa_originate(struct thread *thread);
-extern void ospf6_network_lsa_originate(struct thread *thread);
-extern void ospf6_link_lsa_originate(struct thread *thread);
-extern void ospf6_intra_prefix_lsa_originate_transit(struct thread *thread);
-extern void ospf6_intra_prefix_lsa_originate_stub(struct thread *thread);
+extern void ospf6_router_lsa_originate(struct event *thread);
+extern void ospf6_network_lsa_originate(struct event *thread);
+extern void ospf6_link_lsa_originate(struct event *thread);
+extern void ospf6_intra_prefix_lsa_originate_transit(struct event *thread);
+extern void ospf6_intra_prefix_lsa_originate_stub(struct event *thread);
extern void ospf6_intra_prefix_lsa_add(struct ospf6_lsa *lsa);
extern void ospf6_intra_prefix_lsa_remove(struct ospf6_lsa *lsa);
-extern void ospf6_orig_as_external_lsa(struct thread *thread);
+extern void ospf6_orig_as_external_lsa(struct event *thread);
extern void ospf6_intra_route_calculation(struct ospf6_area *oa);
extern void ospf6_intra_brouter_calculation(struct ospf6_area *oa);
extern void ospf6_intra_prefix_route_ecmp_path(struct ospf6_area *oa,
#include "vty.h"
#include "command.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "checksum.h"
#include "frrstr.h"
if (IS_OSPF6_DEBUG_LSA_TYPE(lsa->header->type))
zlog_debug("LSA: Premature aging: %s", lsa->name);
- THREAD_OFF(lsa->expire);
- THREAD_OFF(lsa->refresh);
+ EVENT_OFF(lsa->expire);
+ EVENT_OFF(lsa->refresh);
/*
* We clear the LSA from the neighbor retx lists now because it
ospf6_flood_clear(lsa);
lsa->header->age = htons(OSPF_LSA_MAXAGE);
- thread_execute(master, ospf6_lsa_expire, lsa, 0);
+ event_execute(master, ospf6_lsa_expire, lsa, 0);
}
/* check which is more recent. if a is more recent, return -1;
assert(lsa->lock == 0);
/* cancel threads */
- THREAD_OFF(lsa->expire);
- THREAD_OFF(lsa->refresh);
+ EVENT_OFF(lsa->expire);
+ EVENT_OFF(lsa->refresh);
/* do free */
XFREE(MTYPE_OSPF6_LSA_HEADER, lsa->header);
/* ospf6 lsa expiry */
-void ospf6_lsa_expire(struct thread *thread)
+void ospf6_lsa_expire(struct event *thread)
{
struct ospf6_lsa *lsa;
struct ospf6 *ospf6;
- lsa = (struct ospf6_lsa *)THREAD_ARG(thread);
+ lsa = (struct ospf6_lsa *)EVENT_ARG(thread);
assert(lsa && lsa->header);
assert(OSPF6_LSA_IS_MAXAGE(lsa));
assert(!lsa->refresh);
- lsa->expire = (struct thread *)NULL;
+ lsa->expire = (struct event *)NULL;
if (IS_OSPF6_DEBUG_LSA_TYPE(lsa->header->type)) {
zlog_debug("LSA Expire:");
ospf6_maxage_remove(ospf6);
}
-void ospf6_lsa_refresh(struct thread *thread)
+void ospf6_lsa_refresh(struct event *thread)
{
struct ospf6_lsa *old, *self, *new;
struct ospf6_lsdb *lsdb_self;
- old = (struct ospf6_lsa *)THREAD_ARG(thread);
+ old = (struct ospf6_lsa *)EVENT_ARG(thread);
assert(old && old->header);
- old->refresh = (struct thread *)NULL;
+ old->refresh = (struct event *)NULL;
lsdb_self = ospf6_get_scoped_lsdb_self(old);
self = ospf6_lsdb_lookup(old->header->type, old->header->id,
new = ospf6_lsa_create(self->header);
new->lsdb = old->lsdb;
- thread_add_timer(master, ospf6_lsa_refresh, new, OSPF_LS_REFRESH_TIME,
- &new->refresh);
+ event_add_timer(master, ospf6_lsa_refresh, new, OSPF_LS_REFRESH_TIME,
+ &new->refresh);
/* store it in the LSDB for self-originated LSAs */
ospf6_lsdb_add(ospf6_lsa_copy(new), lsdb_self);
struct timeval received; /* used by MinLSArrival check */
struct timeval installed;
- struct thread *expire;
- struct thread *refresh; /* For self-originated LSA */
+ struct event *expire;
+ struct event *refresh; /* For self-originated LSA */
int retrans_count;
extern struct ospf6_lsa *ospf6_lsa_lock(struct ospf6_lsa *lsa);
extern struct ospf6_lsa *ospf6_lsa_unlock(struct ospf6_lsa *lsa);
-extern void ospf6_lsa_expire(struct thread *thread);
-extern void ospf6_lsa_refresh(struct thread *thread);
+extern void ospf6_lsa_expire(struct event *thread);
+extern void ospf6_lsa_refresh(struct event *thread);
extern unsigned short ospf6_lsa_checksum(struct ospf6_lsa_header *lsah);
extern int ospf6_lsa_checksum_valid(struct ospf6_lsa_header *lsah);
htonl(OSPF_MAX_SEQUENCE_NUMBER + 1);
ospf6_lsa_checksum(lsa->header);
- THREAD_OFF(lsa->refresh);
- thread_execute(master, ospf6_lsa_refresh, lsa, 0);
+ EVENT_OFF(lsa->refresh);
+ event_execute(master, ospf6_lsa_refresh, lsa, 0);
} else {
zlog_debug("calling ospf6_lsdb_remove %s", lsa->name);
ospf6_lsdb_remove(lsa, lsdb);
#include <stdlib.h>
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include "log.h"
#include "command.h"
#include "vty.h"
struct option longopts[] = {{0}};
/* Master of threads. */
-struct thread_master *master;
+struct event_loop *master;
static void __attribute__((noreturn)) ospf6_exit(int status)
{
#include "log.h"
#include "vty.h"
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "lib_errors.h"
#include "checksum.h"
oi->hello_in++;
/* Execute neighbor events */
- thread_execute(master, hello_received, on, 0);
+ event_execute(master, hello_received, on, 0);
if (twoway)
- thread_execute(master, twoway_received, on, 0);
+ event_execute(master, twoway_received, on, 0);
else {
if (OSPF6_GR_IS_ACTIVE_HELPER(on)) {
if (IS_DEBUG_OSPF6_GR)
* receives one_way hellow when it acts as HELPER for
* that specific neighbor.
*/
- thread_execute(master, oneway_received, on, 0);
+ event_execute(master, oneway_received, on, 0);
}
}
/* Schedule interface events */
if (backupseen)
- thread_add_event(master, backup_seen, oi, 0, NULL);
+ event_add_event(master, backup_seen, oi, 0, NULL);
if (neighborchange)
- thread_add_event(master, neighbor_change, oi, 0, NULL);
+ event_add_event(master, neighbor_change, oi, 0, NULL);
if (neighbor_ifindex_change && on->state == OSPF6_NEIGHBOR_FULL)
OSPF6_ROUTER_LSA_SCHEDULE(oi->area);
return;
case OSPF6_NEIGHBOR_INIT:
- thread_execute(master, twoway_received, on, 0);
+ event_execute(master, twoway_received, on, 0);
if (on->state != OSPF6_NEIGHBOR_EXSTART) {
if (IS_OSPF6_DEBUG_MESSAGE(oh->type, RECV_HDR))
zlog_debug(
&& !CHECK_FLAG(dbdesc->bits, OSPF6_DBDESC_IBIT)
&& ntohl(dbdesc->seqnum) == on->dbdesc_seqnum) {
/* execute NegotiationDone */
- thread_execute(master, negotiation_done, on, 0);
+ event_execute(master, negotiation_done, on, 0);
/* Record neighbor options */
memcpy(on->options, dbdesc->options,
zlog_warn(
"DbDesc recv: Master/Slave bit mismatch Nbr %s",
on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
if (CHECK_FLAG(dbdesc->bits, OSPF6_DBDESC_IBIT)) {
zlog_warn("DbDesc recv: Initialize bit mismatch Nbr %s",
on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
if (memcmp(on->options, dbdesc->options, sizeof(on->options))) {
zlog_warn("DbDesc recv: Option field mismatch Nbr %s",
on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
"DbDesc recv: Sequence number mismatch Nbr %s (received %#lx, %#lx expected)",
on->name, (unsigned long)ntohl(dbdesc->seqnum),
(unsigned long)on->dbdesc_seqnum);
- thread_add_event(master, seqnumber_mismatch, on, 0,
+ event_add_event(master, seqnumber_mismatch, on, 0,
NULL);
return;
}
zlog_warn(
"DbDesc recv: Not duplicate dbdesc in state %s Nbr %s",
ospf6_neighbor_state_str[on->state], on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0, NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0, NULL);
return;
default:
zlog_debug(
"SeqNumMismatch (E-bit mismatch), discard");
ospf6_lsa_delete(his);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
/* schedule send lsreq */
if (on->request_list->count)
- thread_add_event(master, ospf6_lsreq_send, on, 0,
- &on->thread_send_lsreq);
+ event_add_event(master, ospf6_lsreq_send, on, 0,
+ &on->thread_send_lsreq);
- THREAD_OFF(on->thread_send_dbdesc);
+ EVENT_OFF(on->thread_send_dbdesc);
/* More bit check */
if (!CHECK_FLAG(dbdesc->bits, OSPF6_DBDESC_MBIT)
&& !CHECK_FLAG(on->dbdesc_bits, OSPF6_DBDESC_MBIT))
- thread_add_event(master, exchange_done, on, 0,
- &on->thread_exchange_done);
+ event_add_event(master, exchange_done, on, 0,
+ &on->thread_exchange_done);
else {
- thread_add_event(master, ospf6_dbdesc_send_newone, on, 0,
- &on->thread_send_dbdesc);
+ event_add_event(master, ospf6_dbdesc_send_newone, on, 0,
+ &on->thread_send_dbdesc);
}
/* save last received dbdesc */
return;
case OSPF6_NEIGHBOR_INIT:
- thread_execute(master, twoway_received, on, 0);
+ event_execute(master, twoway_received, on, 0);
if (on->state != OSPF6_NEIGHBOR_EXSTART) {
if (IS_OSPF6_DEBUG_MESSAGE(oh->type, RECV_HDR))
zlog_debug(
on->dbdesc_seqnum = ntohl(dbdesc->seqnum);
/* schedule NegotiationDone */
- thread_execute(master, negotiation_done, on, 0);
+ event_execute(master, negotiation_done, on, 0);
/* Record neighbor options */
memcpy(on->options, dbdesc->options,
if (IS_OSPF6_DEBUG_MESSAGE(oh->type, RECV_HDR))
zlog_debug(
"Duplicated dbdesc causes retransmit");
- THREAD_OFF(on->thread_send_dbdesc);
- thread_add_event(master, ospf6_dbdesc_send, on, 0,
- &on->thread_send_dbdesc);
+ EVENT_OFF(on->thread_send_dbdesc);
+ event_add_event(master, ospf6_dbdesc_send, on, 0,
+ &on->thread_send_dbdesc);
return;
}
zlog_warn(
"DbDesc slave recv: Master/Slave bit mismatch Nbr %s",
on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
zlog_warn(
"DbDesc slave recv: Initialize bit mismatch Nbr %s",
on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
zlog_warn(
"DbDesc slave recv: Option field mismatch Nbr %s",
on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
"DbDesc slave recv: Sequence number mismatch Nbr %s (received %#lx, %#lx expected)",
on->name, (unsigned long)ntohl(dbdesc->seqnum),
(unsigned long)on->dbdesc_seqnum + 1);
- thread_add_event(master, seqnumber_mismatch, on, 0,
+ event_add_event(master, seqnumber_mismatch, on, 0,
NULL);
return;
}
if (IS_OSPF6_DEBUG_MESSAGE(oh->type, RECV_HDR))
zlog_debug(
"Duplicated dbdesc causes retransmit");
- THREAD_OFF(on->thread_send_dbdesc);
- thread_add_event(master, ospf6_dbdesc_send, on, 0,
- &on->thread_send_dbdesc);
+ EVENT_OFF(on->thread_send_dbdesc);
+ event_add_event(master, ospf6_dbdesc_send, on, 0,
+ &on->thread_send_dbdesc);
return;
}
zlog_warn(
"DbDesc slave recv: Not duplicate dbdesc in state %s Nbr %s",
ospf6_neighbor_state_str[on->state], on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0, NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0, NULL);
return;
default:
if (IS_OSPF6_DEBUG_MESSAGE(oh->type, RECV))
zlog_debug("E-bit mismatch with LSA Headers");
ospf6_lsa_delete(his);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
/* schedule send lsreq */
if (on->request_list->count)
- thread_add_event(master, ospf6_lsreq_send, on, 0,
- &on->thread_send_lsreq);
+ event_add_event(master, ospf6_lsreq_send, on, 0,
+ &on->thread_send_lsreq);
- THREAD_OFF(on->thread_send_dbdesc);
- thread_add_event(master, ospf6_dbdesc_send_newone, on, 0,
- &on->thread_send_dbdesc);
+ EVENT_OFF(on->thread_send_dbdesc);
+ event_add_event(master, ospf6_dbdesc_send_newone, on, 0,
+ &on->thread_send_dbdesc);
/* save last received dbdesc */
memcpy(&on->dbdesc_last, dbdesc, sizeof(struct ospf6_dbdesc));
"Can't find requested lsa [%s Id:%pI4 Adv:%pI4] send badLSReq",
ospf6_lstype_name(e->type), &e->id,
&e->adv_router);
- thread_add_event(master, bad_lsreq, on, 0, NULL);
+ event_add_event(master, bad_lsreq, on, 0, NULL);
return;
}
assert(p == OSPF6_MESSAGE_END(oh));
/* schedule send lsupdate */
- THREAD_OFF(on->thread_send_lsupdate);
- thread_add_event(master, ospf6_lsupdate_send_neighbor, on, 0,
- &on->thread_send_lsupdate);
+ EVENT_OFF(on->thread_send_lsupdate);
+ event_add_event(master, ospf6_lsupdate_send_neighbor, on, 0,
+ &on->thread_send_lsupdate);
}
/* Verify, that the specified memory area contains exactly N valid IPv6
return OSPF6_READ_CONTINUE;
}
-void ospf6_receive(struct thread *thread)
+void ospf6_receive(struct event *thread)
{
int sockfd;
struct ospf6 *ospf6;
int count = 0;
/* add next read thread */
- ospf6 = THREAD_ARG(thread);
- sockfd = THREAD_FD(thread);
+ ospf6 = EVENT_ARG(thread);
+ sockfd = EVENT_FD(thread);
- thread_add_read(master, ospf6_receive, ospf6, ospf6->fd,
- &ospf6->t_ospf6_receive);
+ event_add_read(master, ospf6_receive, ospf6, ospf6->fd,
+ &ospf6->t_ospf6_receive);
while (count < ospf6->write_oi_count) {
count++;
return length;
}
-static void ospf6_write(struct thread *thread)
+static void ospf6_write(struct event *thread)
{
- struct ospf6 *ospf6 = THREAD_ARG(thread);
+ struct ospf6 *ospf6 = EVENT_ARG(thread);
struct ospf6_interface *oi;
struct ospf6_header *oh;
struct ospf6_packet *op;
/* If packets still remain in queue, call write thread. */
if (!list_isempty(ospf6->oi_write_q))
- thread_add_write(master, ospf6_write, ospf6, ospf6->fd,
- &ospf6->t_write);
+ event_add_write(master, ospf6_write, ospf6, ospf6->fd,
+ &ospf6->t_write);
}
-void ospf6_hello_send(struct thread *thread)
+void ospf6_hello_send(struct event *thread)
{
struct ospf6_interface *oi;
struct ospf6_packet *op;
uint16_t length = OSPF6_HEADER_SIZE;
- oi = (struct ospf6_interface *)THREAD_ARG(thread);
+ oi = (struct ospf6_interface *)EVENT_ARG(thread);
if (oi->state <= OSPF6_INTERFACE_DOWN) {
if (IS_OSPF6_DEBUG_MESSAGE(OSPF6_MESSAGE_TYPE_HELLO, SEND_HDR))
ospf6_packet_add_top(oi, op);
/* set next thread */
- thread_add_timer(master, ospf6_hello_send, oi, oi->hello_interval,
- &oi->thread_send_hello);
+ event_add_timer(master, ospf6_hello_send, oi, oi->hello_interval,
+ &oi->thread_send_hello);
OSPF6_MESSAGE_WRITE_ON(oi);
}
return length;
}
-void ospf6_dbdesc_send(struct thread *thread)
+void ospf6_dbdesc_send(struct event *thread)
{
struct ospf6_neighbor *on;
uint16_t length = OSPF6_HEADER_SIZE;
struct ospf6_packet *op;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
if (on->state < OSPF6_NEIGHBOR_EXSTART) {
if (IS_OSPF6_DEBUG_MESSAGE(OSPF6_MESSAGE_TYPE_DBDESC, SEND))
/* set next thread if master */
if (CHECK_FLAG(on->dbdesc_bits, OSPF6_DBDESC_MSBIT))
- thread_add_timer(master, ospf6_dbdesc_send, on,
- on->ospf6_if->rxmt_interval,
- &on->thread_send_dbdesc);
+ event_add_timer(master, ospf6_dbdesc_send, on,
+ on->ospf6_if->rxmt_interval,
+ &on->thread_send_dbdesc);
op = ospf6_packet_new(on->ospf6_if->ifmtu);
ospf6_make_header(OSPF6_MESSAGE_TYPE_DBDESC, on->ospf6_if, op->s);
OSPF6_MESSAGE_WRITE_ON(on->ospf6_if);
}
-void ospf6_dbdesc_send_newone(struct thread *thread)
+void ospf6_dbdesc_send_newone(struct event *thread)
{
struct ospf6_neighbor *on;
struct ospf6_lsa *lsa, *lsanext;
unsigned int size = 0;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
ospf6_lsdb_remove_all(on->dbdesc_list);
/* move LSAs from summary_list to dbdesc_list (within neighbor
if (!CHECK_FLAG(on->dbdesc_bits, OSPF6_DBDESC_MSBIT) && /* Slave */
!CHECK_FLAG(on->dbdesc_last.bits, OSPF6_DBDESC_MBIT)
&& !CHECK_FLAG(on->dbdesc_bits, OSPF6_DBDESC_MBIT))
- thread_add_event(master, exchange_done, on, 0,
- &on->thread_exchange_done);
+ event_add_event(master, exchange_done, on, 0,
+ &on->thread_exchange_done);
- thread_execute(master, ospf6_dbdesc_send, on, 0);
+ event_execute(master, ospf6_dbdesc_send, on, 0);
}
static uint16_t ospf6_make_lsreq(struct ospf6_neighbor *on, struct stream *s)
return length;
}
-void ospf6_lsreq_send(struct thread *thread)
+void ospf6_lsreq_send(struct event *thread)
{
struct ospf6_neighbor *on;
struct ospf6_packet *op;
uint16_t length = OSPF6_HEADER_SIZE;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
/* LSReq will be sent only in ExStart or Loading */
if (on->state != OSPF6_NEIGHBOR_EXCHANGE
/* schedule loading_done if request list is empty */
if (on->request_list->count == 0) {
- thread_add_event(master, loading_done, on, 0, NULL);
+ event_add_event(master, loading_done, on, 0, NULL);
return;
}
/* set next thread */
if (on->request_list->count != 0) {
- thread_add_timer(master, ospf6_lsreq_send, on,
- on->ospf6_if->rxmt_interval,
- &on->thread_send_lsreq);
+ event_add_timer(master, ospf6_lsreq_send, on,
+ on->ospf6_if->rxmt_interval,
+ &on->thread_send_lsreq);
}
}
listnode_add(oi->area->ospf6->oi_write_q, oi);
oi->on_write_q = 1;
}
- thread_execute(master, ospf6_write, oi->area->ospf6, 0);
+ event_execute(master, ospf6_write, oi->area->ospf6, 0);
} else
OSPF6_MESSAGE_WRITE_ON(oi);
}
return length;
}
-void ospf6_lsupdate_send_neighbor(struct thread *thread)
+void ospf6_lsupdate_send_neighbor(struct event *thread)
{
struct ospf6_neighbor *on;
struct ospf6_packet *op;
uint16_t length = OSPF6_HEADER_SIZE;
int lsa_cnt = 0;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
if (IS_OSPF6_DEBUG_MESSAGE(OSPF6_MESSAGE_TYPE_LSUPDATE, SEND_HDR))
zlog_debug("LSUpdate to neighbor %s", on->name);
ospf6_packet_free(op);
if (on->lsupdate_list->count != 0) {
- thread_add_event(master, ospf6_lsupdate_send_neighbor, on, 0,
- &on->thread_send_lsupdate);
+ event_add_event(master, ospf6_lsupdate_send_neighbor, on, 0,
+ &on->thread_send_lsupdate);
} else if (on->retrans_list->count != 0) {
- thread_add_timer(master, ospf6_lsupdate_send_neighbor, on,
- on->ospf6_if->rxmt_interval,
- &on->thread_send_lsupdate);
+ event_add_timer(master, ospf6_lsupdate_send_neighbor, on,
+ on->ospf6_if->rxmt_interval,
+ &on->thread_send_lsupdate);
}
}
return length;
}
-void ospf6_lsupdate_send_interface(struct thread *thread)
+void ospf6_lsupdate_send_interface(struct event *thread)
{
struct ospf6_interface *oi;
struct ospf6_packet *op;
uint16_t length = OSPF6_HEADER_SIZE;
int lsa_cnt = 0;
- oi = (struct ospf6_interface *)THREAD_ARG(thread);
+ oi = (struct ospf6_interface *)EVENT_ARG(thread);
if (oi->state <= OSPF6_INTERFACE_WAITING) {
if (IS_OSPF6_DEBUG_MESSAGE(OSPF6_MESSAGE_TYPE_LSUPDATE,
ospf6_packet_free(op);
if (oi->lsupdate_list->count > 0) {
- thread_add_event(master, ospf6_lsupdate_send_interface, oi, 0,
- &oi->thread_send_lsupdate);
+ event_add_event(master, ospf6_lsupdate_send_interface, oi, 0,
+ &oi->thread_send_lsupdate);
}
}
-void ospf6_lsack_send_neighbor(struct thread *thread)
+void ospf6_lsack_send_neighbor(struct event *thread)
{
struct ospf6_neighbor *on;
struct ospf6_packet *op;
uint16_t length = OSPF6_HEADER_SIZE;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
if (on->state < OSPF6_NEIGHBOR_EXCHANGE) {
if (IS_OSPF6_DEBUG_MESSAGE(OSPF6_MESSAGE_TYPE_LSACK, SEND_HDR))
OSPF6_MESSAGE_WRITE_ON(on->ospf6_if);
if (on->lsack_list->count > 0)
- thread_add_event(master, ospf6_lsack_send_neighbor, on, 0,
- &on->thread_send_lsack);
+ event_add_event(master, ospf6_lsack_send_neighbor, on, 0,
+ &on->thread_send_lsack);
}
static uint16_t ospf6_make_lsack_interface(struct ospf6_interface *oi,
> ospf6_packet_max(oi)) {
/* if we run out of packet size/space here,
better to try again soon. */
- THREAD_OFF(oi->thread_send_lsack);
- thread_add_event(master, ospf6_lsack_send_interface, oi,
- 0, &oi->thread_send_lsack);
+ EVENT_OFF(oi->thread_send_lsack);
+ event_add_event(master, ospf6_lsack_send_interface, oi,
+ 0, &oi->thread_send_lsack);
ospf6_lsa_unlock(lsa);
if (lsanext)
return length;
}
-void ospf6_lsack_send_interface(struct thread *thread)
+void ospf6_lsack_send_interface(struct event *thread)
{
struct ospf6_interface *oi;
struct ospf6_packet *op;
uint16_t length = OSPF6_HEADER_SIZE;
- oi = (struct ospf6_interface *)THREAD_ARG(thread);
+ oi = (struct ospf6_interface *)EVENT_ARG(thread);
if (oi->state <= OSPF6_INTERFACE_WAITING) {
if (IS_OSPF6_DEBUG_MESSAGE(OSPF6_MESSAGE_TYPE_LSACK, SEND_HDR))
OSPF6_MESSAGE_WRITE_ON(oi);
if (oi->lsack_list->count > 0)
- thread_add_event(master, ospf6_lsack_send_interface, oi, 0,
- &oi->thread_send_lsack);
+ event_add_event(master, ospf6_lsack_send_interface, oi, 0,
+ &oi->thread_send_lsack);
}
/* Commands */
extern int ospf6_iobuf_size(unsigned int size);
extern void ospf6_message_terminate(void);
-extern void ospf6_receive(struct thread *thread);
-
-extern void ospf6_hello_send(struct thread *thread);
-extern void ospf6_dbdesc_send(struct thread *thread);
-extern void ospf6_dbdesc_send_newone(struct thread *thread);
-extern void ospf6_lsreq_send(struct thread *thread);
-extern void ospf6_lsupdate_send_interface(struct thread *thread);
-extern void ospf6_lsupdate_send_neighbor(struct thread *thread);
-extern void ospf6_lsack_send_interface(struct thread *thread);
-extern void ospf6_lsack_send_neighbor(struct thread *thread);
+extern void ospf6_receive(struct event *thread);
+
+extern void ospf6_hello_send(struct event *thread);
+extern void ospf6_dbdesc_send(struct event *thread);
+extern void ospf6_dbdesc_send_newone(struct event *thread);
+extern void ospf6_lsreq_send(struct event *thread);
+extern void ospf6_lsupdate_send_interface(struct event *thread);
+extern void ospf6_lsupdate_send_neighbor(struct event *thread);
+extern void ospf6_lsack_send_interface(struct event *thread);
+extern void ospf6_lsack_send_neighbor(struct event *thread);
extern int config_write_ospf6_debug_message(struct vty *);
extern void install_element_ospf6_debug_message(void);
#include "log.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "vty.h"
#include "command.h"
ospf6_lsdb_delete(on->lsupdate_list);
ospf6_lsdb_delete(on->lsack_list);
- THREAD_OFF(on->inactivity_timer);
+ EVENT_OFF(on->inactivity_timer);
- THREAD_OFF(on->last_dbdesc_release_timer);
+ EVENT_OFF(on->last_dbdesc_release_timer);
- THREAD_OFF(on->thread_send_dbdesc);
- THREAD_OFF(on->thread_send_lsreq);
- THREAD_OFF(on->thread_send_lsupdate);
- THREAD_OFF(on->thread_send_lsack);
- THREAD_OFF(on->thread_exchange_done);
- THREAD_OFF(on->thread_adj_ok);
+ EVENT_OFF(on->thread_send_dbdesc);
+ EVENT_OFF(on->thread_send_lsreq);
+ EVENT_OFF(on->thread_send_lsupdate);
+ EVENT_OFF(on->thread_send_lsack);
+ EVENT_OFF(on->thread_exchange_done);
+ EVENT_OFF(on->thread_adj_ok);
- THREAD_OFF(on->gr_helper_info.t_grace_timer);
+ EVENT_OFF(on->gr_helper_info.t_grace_timer);
bfd_sess_free(&on->bfd_session);
XFREE(MTYPE_OSPF6_NEIGHBOR, on);
return 0;
}
-void hello_received(struct thread *thread)
+void hello_received(struct event *thread)
{
struct ospf6_neighbor *on;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
assert(on);
if (IS_OSPF6_DEBUG_NEIGHBOR(EVENT))
zlog_debug("Neighbor Event %s: *HelloReceived*", on->name);
/* reset Inactivity Timer */
- THREAD_OFF(on->inactivity_timer);
- thread_add_timer(master, inactivity_timer, on,
- on->ospf6_if->dead_interval, &on->inactivity_timer);
+ EVENT_OFF(on->inactivity_timer);
+ event_add_timer(master, inactivity_timer, on,
+ on->ospf6_if->dead_interval, &on->inactivity_timer);
if (on->state <= OSPF6_NEIGHBOR_DOWN)
ospf6_neighbor_state_change(OSPF6_NEIGHBOR_INIT, on,
OSPF6_NEIGHBOR_EVENT_HELLO_RCVD);
}
-void twoway_received(struct thread *thread)
+void twoway_received(struct event *thread)
{
struct ospf6_neighbor *on;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
assert(on);
if (on->state > OSPF6_NEIGHBOR_INIT)
if (IS_OSPF6_DEBUG_NEIGHBOR(EVENT))
zlog_debug("Neighbor Event %s: *2Way-Received*", on->name);
- thread_add_event(master, neighbor_change, on->ospf6_if, 0, NULL);
+ event_add_event(master, neighbor_change, on->ospf6_if, 0, NULL);
if (!need_adjacency(on)) {
ospf6_neighbor_state_change(OSPF6_NEIGHBOR_TWOWAY, on,
SET_FLAG(on->dbdesc_bits, OSPF6_DBDESC_MBIT);
SET_FLAG(on->dbdesc_bits, OSPF6_DBDESC_IBIT);
- THREAD_OFF(on->thread_send_dbdesc);
- thread_add_event(master, ospf6_dbdesc_send, on, 0,
- &on->thread_send_dbdesc);
+ EVENT_OFF(on->thread_send_dbdesc);
+ event_add_event(master, ospf6_dbdesc_send, on, 0,
+ &on->thread_send_dbdesc);
}
-void negotiation_done(struct thread *thread)
+void negotiation_done(struct event *thread)
{
struct ospf6_neighbor *on;
struct ospf6_lsa *lsa, *lsanext;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
assert(on);
if (on->state != OSPF6_NEIGHBOR_EXSTART)
OSPF6_NEIGHBOR_EVENT_NEGOTIATION_DONE);
}
-static void ospf6_neighbor_last_dbdesc_release(struct thread *thread)
+static void ospf6_neighbor_last_dbdesc_release(struct event *thread)
{
- struct ospf6_neighbor *on = THREAD_ARG(thread);
+ struct ospf6_neighbor *on = EVENT_ARG(thread);
assert(on);
memset(&on->dbdesc_last, 0, sizeof(struct ospf6_dbdesc));
}
-void exchange_done(struct thread *thread)
+void exchange_done(struct event *thread)
{
struct ospf6_neighbor *on;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
assert(on);
if (on->state != OSPF6_NEIGHBOR_EXCHANGE)
if (IS_OSPF6_DEBUG_NEIGHBOR(EVENT))
zlog_debug("Neighbor Event %s: *ExchangeDone*", on->name);
- THREAD_OFF(on->thread_send_dbdesc);
+ EVENT_OFF(on->thread_send_dbdesc);
ospf6_lsdb_remove_all(on->dbdesc_list);
/* RFC 2328 (10.8): Release the last dbdesc after dead_interval */
if (!CHECK_FLAG(on->dbdesc_bits, OSPF6_DBDESC_MSBIT)) {
- THREAD_OFF(on->last_dbdesc_release_timer);
- thread_add_timer(master, ospf6_neighbor_last_dbdesc_release, on,
- on->ospf6_if->dead_interval,
- &on->last_dbdesc_release_timer);
+ EVENT_OFF(on->last_dbdesc_release_timer);
+ event_add_timer(master, ospf6_neighbor_last_dbdesc_release, on,
+ on->ospf6_if->dead_interval,
+ &on->last_dbdesc_release_timer);
}
if (on->request_list->count == 0)
ospf6_neighbor_state_change(OSPF6_NEIGHBOR_LOADING, on,
OSPF6_NEIGHBOR_EVENT_EXCHANGE_DONE);
- thread_add_event(master, ospf6_lsreq_send, on, 0,
- &on->thread_send_lsreq);
+ event_add_event(master, ospf6_lsreq_send, on, 0,
+ &on->thread_send_lsreq);
}
}
if ((on->state == OSPF6_NEIGHBOR_LOADING)
|| (on->state == OSPF6_NEIGHBOR_EXCHANGE)) {
if (on->request_list->count == 0)
- thread_add_event(master, loading_done, on, 0, NULL);
+ event_add_event(master, loading_done, on, 0, NULL);
else if (on->last_ls_req == NULL) {
- THREAD_OFF(on->thread_send_lsreq);
- thread_add_event(master, ospf6_lsreq_send, on, 0,
- &on->thread_send_lsreq);
+ EVENT_OFF(on->thread_send_lsreq);
+ event_add_event(master, ospf6_lsreq_send, on, 0,
+ &on->thread_send_lsreq);
}
}
}
-void loading_done(struct thread *thread)
+void loading_done(struct event *thread)
{
struct ospf6_neighbor *on;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
assert(on);
if (on->state != OSPF6_NEIGHBOR_LOADING)
OSPF6_NEIGHBOR_EVENT_LOADING_DONE);
}
-void adj_ok(struct thread *thread)
+void adj_ok(struct event *thread)
{
struct ospf6_neighbor *on;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
assert(on);
if (IS_OSPF6_DEBUG_NEIGHBOR(EVENT))
SET_FLAG(on->dbdesc_bits, OSPF6_DBDESC_MBIT);
SET_FLAG(on->dbdesc_bits, OSPF6_DBDESC_IBIT);
- THREAD_OFF(on->thread_send_dbdesc);
- thread_add_event(master, ospf6_dbdesc_send, on, 0,
- &on->thread_send_dbdesc);
+ EVENT_OFF(on->thread_send_dbdesc);
+ event_add_event(master, ospf6_dbdesc_send, on, 0,
+ &on->thread_send_dbdesc);
} else if (on->state >= OSPF6_NEIGHBOR_EXSTART && !need_adjacency(on)) {
ospf6_neighbor_state_change(OSPF6_NEIGHBOR_TWOWAY, on,
}
}
-void seqnumber_mismatch(struct thread *thread)
+void seqnumber_mismatch(struct event *thread)
{
struct ospf6_neighbor *on;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
assert(on);
if (on->state < OSPF6_NEIGHBOR_EXCHANGE)
ospf6_neighbor_clear_ls_lists(on);
- THREAD_OFF(on->thread_send_dbdesc);
+ EVENT_OFF(on->thread_send_dbdesc);
on->dbdesc_seqnum++; /* Incr seqnum as per RFC2328, sec 10.3 */
- thread_add_event(master, ospf6_dbdesc_send, on, 0,
- &on->thread_send_dbdesc);
+ event_add_event(master, ospf6_dbdesc_send, on, 0,
+ &on->thread_send_dbdesc);
}
-void bad_lsreq(struct thread *thread)
+void bad_lsreq(struct event *thread)
{
struct ospf6_neighbor *on;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
assert(on);
if (on->state < OSPF6_NEIGHBOR_EXCHANGE)
ospf6_neighbor_clear_ls_lists(on);
- THREAD_OFF(on->thread_send_dbdesc);
+ EVENT_OFF(on->thread_send_dbdesc);
on->dbdesc_seqnum++; /* Incr seqnum as per RFC2328, sec 10.3 */
- thread_add_event(master, ospf6_dbdesc_send, on, 0,
- &on->thread_send_dbdesc);
-
+ event_add_event(master, ospf6_dbdesc_send, on, 0,
+ &on->thread_send_dbdesc);
}
-void oneway_received(struct thread *thread)
+void oneway_received(struct event *thread)
{
struct ospf6_neighbor *on;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
assert(on);
if (on->state < OSPF6_NEIGHBOR_TWOWAY)
ospf6_neighbor_state_change(OSPF6_NEIGHBOR_INIT, on,
OSPF6_NEIGHBOR_EVENT_ONEWAY_RCVD);
- thread_add_event(master, neighbor_change, on->ospf6_if, 0, NULL);
+ event_add_event(master, neighbor_change, on->ospf6_if, 0, NULL);
ospf6_neighbor_clear_ls_lists(on);
- THREAD_OFF(on->thread_send_dbdesc);
- THREAD_OFF(on->thread_send_lsreq);
- THREAD_OFF(on->thread_send_lsupdate);
- THREAD_OFF(on->thread_send_lsack);
- THREAD_OFF(on->thread_exchange_done);
- THREAD_OFF(on->thread_adj_ok);
+ EVENT_OFF(on->thread_send_dbdesc);
+ EVENT_OFF(on->thread_send_lsreq);
+ EVENT_OFF(on->thread_send_lsupdate);
+ EVENT_OFF(on->thread_send_lsack);
+ EVENT_OFF(on->thread_exchange_done);
+ EVENT_OFF(on->thread_adj_ok);
}
-void inactivity_timer(struct thread *thread)
+void inactivity_timer(struct event *thread)
{
struct ospf6_neighbor *on;
- on = (struct ospf6_neighbor *)THREAD_ARG(thread);
+ on = (struct ospf6_neighbor *)EVENT_ARG(thread);
assert(on);
if (IS_OSPF6_DEBUG_NEIGHBOR(EVENT))
ospf6_neighbor_state_change(
OSPF6_NEIGHBOR_DOWN, on,
OSPF6_NEIGHBOR_EVENT_INACTIVITY_TIMER);
- thread_add_event(master, neighbor_change, on->ospf6_if, 0,
- NULL);
+ event_add_event(master, neighbor_change, on->ospf6_if, 0, NULL);
listnode_delete(on->ospf6_if->neighbor_list, on);
ospf6_neighbor_delete(on);
"%s, Acting as HELPER for this neighbour, So restart the dead timer.",
__PRETTY_FUNCTION__);
- thread_add_timer(master, inactivity_timer, on,
- on->ospf6_if->dead_interval,
- &on->inactivity_timer);
+ event_add_timer(master, inactivity_timer, on,
+ on->ospf6_if->dead_interval,
+ &on->inactivity_timer);
}
}
timerclear(&res);
- if (thread_is_scheduled(on->thread_send_dbdesc))
+ if (event_is_scheduled(on->thread_send_dbdesc))
timersub(&on->thread_send_dbdesc->u.sands, &now, &res);
timerstring(&res, duration, sizeof(duration));
json_object_int_add(json_neighbor, "pendingLsaDbDescCount",
duration);
json_object_string_add(
json_neighbor, "dbDescSendThread",
- (thread_is_scheduled(on->thread_send_dbdesc) ? "on"
- : "off"));
+ (event_is_scheduled(on->thread_send_dbdesc) ? "on"
+ : "off"));
json_array = json_object_new_array();
for (ALL_LSDB(on->dbdesc_list, lsa, lsanext))
json_object_array_add(
json_array);
timerclear(&res);
- if (thread_is_scheduled(on->thread_send_lsreq))
+ if (event_is_scheduled(on->thread_send_lsreq))
timersub(&on->thread_send_lsreq->u.sands, &now, &res);
timerstring(&res, duration, sizeof(duration));
json_object_int_add(json_neighbor, "pendingLsaLsReqCount",
duration);
json_object_string_add(
json_neighbor, "lsReqSendThread",
- (thread_is_scheduled(on->thread_send_lsreq) ? "on"
- : "off"));
+ (event_is_scheduled(on->thread_send_lsreq) ? "on"
+ : "off"));
json_array = json_object_new_array();
for (ALL_LSDB(on->request_list, lsa, lsanext))
json_object_array_add(
timerclear(&res);
- if (thread_is_scheduled(on->thread_send_lsupdate))
+ if (event_is_scheduled(on->thread_send_lsupdate))
timersub(&on->thread_send_lsupdate->u.sands, &now,
&res);
timerstring(&res, duration, sizeof(duration));
duration);
json_object_string_add(
json_neighbor, "lsUpdateSendThread",
- (thread_is_scheduled(on->thread_send_lsupdate)
- ? "on"
- : "off"));
+ (event_is_scheduled(on->thread_send_lsupdate) ? "on"
+ : "off"));
json_array = json_object_new_array();
for (ALL_LSDB(on->lsupdate_list, lsa, lsanext))
json_object_array_add(
json_array);
timerclear(&res);
- if (thread_is_scheduled(on->thread_send_lsack))
+ if (event_is_scheduled(on->thread_send_lsack))
timersub(&on->thread_send_lsack->u.sands, &now, &res);
timerstring(&res, duration, sizeof(duration));
json_object_int_add(json_neighbor, "pendingLsaLsAckCount",
duration);
json_object_string_add(
json_neighbor, "lsAckSendThread",
- (thread_is_scheduled(on->thread_send_lsack) ? "on"
- : "off"));
+ (event_is_scheduled(on->thread_send_lsack) ? "on"
+ : "off"));
json_array = json_object_new_array();
for (ALL_LSDB(on->lsack_list, lsa, lsanext))
json_object_array_add(
vty_out(vty, " %s\n", lsa->name);
timerclear(&res);
- if (thread_is_scheduled(on->thread_send_dbdesc))
+ if (event_is_scheduled(on->thread_send_dbdesc))
timersub(&on->thread_send_dbdesc->u.sands, &now, &res);
timerstring(&res, duration, sizeof(duration));
vty_out(vty,
" %d Pending LSAs for DbDesc in Time %s [thread %s]\n",
on->dbdesc_list->count, duration,
- (thread_is_scheduled(on->thread_send_dbdesc) ? "on"
- : "off"));
+ (event_is_scheduled(on->thread_send_dbdesc) ? "on"
+ : "off"));
for (ALL_LSDB(on->dbdesc_list, lsa, lsanext))
vty_out(vty, " %s\n", lsa->name);
timerclear(&res);
- if (thread_is_scheduled(on->thread_send_lsreq))
+ if (event_is_scheduled(on->thread_send_lsreq))
timersub(&on->thread_send_lsreq->u.sands, &now, &res);
timerstring(&res, duration, sizeof(duration));
vty_out(vty,
" %d Pending LSAs for LSReq in Time %s [thread %s]\n",
on->request_list->count, duration,
- (thread_is_scheduled(on->thread_send_lsreq) ? "on"
- : "off"));
+ (event_is_scheduled(on->thread_send_lsreq) ? "on"
+ : "off"));
for (ALL_LSDB(on->request_list, lsa, lsanext))
vty_out(vty, " %s\n", lsa->name);
timerclear(&res);
- if (thread_is_scheduled(on->thread_send_lsupdate))
+ if (event_is_scheduled(on->thread_send_lsupdate))
timersub(&on->thread_send_lsupdate->u.sands, &now,
&res);
timerstring(&res, duration, sizeof(duration));
vty_out(vty,
" %d Pending LSAs for LSUpdate in Time %s [thread %s]\n",
on->lsupdate_list->count, duration,
- (thread_is_scheduled(on->thread_send_lsupdate)
- ? "on"
- : "off"));
+ (event_is_scheduled(on->thread_send_lsupdate) ? "on"
+ : "off"));
for (ALL_LSDB(on->lsupdate_list, lsa, lsanext))
vty_out(vty, " %s\n", lsa->name);
timerclear(&res);
- if (thread_is_scheduled(on->thread_send_lsack))
+ if (event_is_scheduled(on->thread_send_lsack))
timersub(&on->thread_send_lsack->u.sands, &now, &res);
timerstring(&res, duration, sizeof(duration));
vty_out(vty,
" %d Pending LSAs for LSAck in Time %s [thread %s]\n",
on->lsack_list->count, duration,
- (thread_is_scheduled(on->thread_send_lsack) ? "on"
- : "off"));
+ (event_is_scheduled(on->thread_send_lsack) ? "on"
+ : "off"));
for (ALL_LSDB(on->lsack_list, lsa, lsanext))
vty_out(vty, " %s\n", lsa->name);
* helper until this timer until
* this timer expires.
*/
- struct thread *t_grace_timer;
+ struct event *t_grace_timer;
/* Helper status */
uint32_t gr_helper_status;
struct ospf6_lsa *last_ls_req;
/* Inactivity timer */
- struct thread *inactivity_timer;
+ struct event *inactivity_timer;
/* Timer to release the last dbdesc packet */
- struct thread *last_dbdesc_release_timer;
+ struct event *last_dbdesc_release_timer;
/* Thread for sending message */
- struct thread *thread_send_dbdesc;
- struct thread *thread_send_lsreq;
- struct thread *thread_send_lsupdate;
- struct thread *thread_send_lsack;
- struct thread *thread_exchange_done;
- struct thread *thread_adj_ok;
+ struct event *thread_send_dbdesc;
+ struct event *thread_send_lsreq;
+ struct event *thread_send_lsupdate;
+ struct event *thread_send_lsack;
+ struct event *thread_exchange_done;
+ struct event *thread_adj_ok;
/* BFD information */
struct bfd_session_params *bfd_session;
void ospf6_neighbor_delete(struct ospf6_neighbor *on);
/* Neighbor event */
-extern void hello_received(struct thread *thread);
-extern void twoway_received(struct thread *thread);
-extern void negotiation_done(struct thread *thread);
-extern void exchange_done(struct thread *thread);
-extern void loading_done(struct thread *thread);
-extern void adj_ok(struct thread *thread);
-extern void seqnumber_mismatch(struct thread *thread);
-extern void bad_lsreq(struct thread *thread);
-extern void oneway_received(struct thread *thread);
-extern void inactivity_timer(struct thread *thread);
+extern void hello_received(struct event *thread);
+extern void twoway_received(struct event *thread);
+extern void negotiation_done(struct event *thread);
+extern void exchange_done(struct event *thread);
+extern void loading_done(struct event *thread);
+extern void adj_ok(struct event *thread);
+extern void seqnumber_mismatch(struct event *thread);
+extern void bad_lsreq(struct event *thread);
+extern void oneway_received(struct event *thread);
+extern void inactivity_timer(struct event *thread);
extern void ospf6_check_nbr_loading(struct ospf6_neighbor *on);
extern void ospf6_neighbor_init(void);
listnode_add(oi->area->ospf6->oi_write_q, (oi)); \
(oi)->on_write_q = 1; \
} \
- if (list_was_empty \
- && !list_isempty(oi->area->ospf6->oi_write_q)) \
- thread_add_write(master, ospf6_write, oi->area->ospf6, \
- oi->area->ospf6->fd, \
- &oi->area->ospf6->t_write); \
+ if (list_was_empty && \
+ !list_isempty(oi->area->ospf6->oi_write_q)) \
+ event_add_write(master, ospf6_write, oi->area->ospf6, \
+ oi->area->ospf6->fd, \
+ &oi->area->ospf6->t_write); \
} while (0)
#endif /* OSPF6_NETWORK_H */
#include "vty.h"
#include "linklist.h"
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "plist.h"
#include "filter.h"
}
/* This function performs ABR related processing */
-static void ospf6_abr_task_timer(struct thread *thread)
+static void ospf6_abr_task_timer(struct event *thread)
{
- struct ospf6 *ospf6 = THREAD_ARG(thread);
+ struct ospf6 *ospf6 = EVENT_ARG(thread);
if (IS_OSPF6_DEBUG_ABR)
zlog_debug("Running ABR task on timer");
void ospf6_schedule_abr_task(struct ospf6 *ospf6)
{
- if (thread_is_scheduled(ospf6->t_abr_task)) {
+ if (event_is_scheduled(ospf6->t_abr_task)) {
if (IS_OSPF6_DEBUG_ABR)
zlog_debug("ABR task already scheduled");
return;
if (IS_OSPF6_DEBUG_ABR)
zlog_debug("Scheduling ABR task");
- thread_add_timer(master, ospf6_abr_task_timer, ospf6,
- OSPF6_ABR_TASK_DELAY, &ospf6->t_abr_task);
+ event_add_timer(master, ospf6_abr_task_timer, ospf6,
+ OSPF6_ABR_TASK_DELAY, &ospf6->t_abr_task);
}
/* Flush the NSSA LSAs from the area */
route->path.origin.id, o->router_id,
o->lsdb);
if (old) {
- THREAD_OFF(old->refresh);
- thread_add_event(master, ospf6_lsa_refresh, old, 0,
- &old->refresh);
+ EVENT_OFF(old->refresh);
+ event_add_event(master, ospf6_lsa_refresh, old, 0,
+ &old->refresh);
} else {
ospf6_as_external_lsa_originate(route, o);
}
lsa)) {
if (IS_OSPF6_DEBUG_NSSA)
ospf6_lsa_header_print(lsa);
- THREAD_OFF(lsa->refresh);
- thread_add_event(master, ospf6_lsa_refresh, lsa,
- 0, &lsa->refresh);
+ EVENT_OFF(lsa->refresh);
+ event_add_event(master, ospf6_lsa_refresh, lsa,
+ 0, &lsa->refresh);
}
}
}
SET_FLAG(range->flag, OSPF6_ROUTE_REMOVE);
/* Redo summaries if required */
- thread_execute(master, ospf6_abr_task_timer, ospf6, 0);
+ event_execute(master, ospf6_abr_task_timer, ospf6, 0);
}
ospf6_route_remove(range, oa->nssa_range_table);
}
/* Register OSPFv3-MIB. */
-static int ospf6_snmp_init(struct thread_master *master)
+static int ospf6_snmp_init(struct event_loop *master)
{
smux_init(master);
REGISTER_MIB("OSPFv3MIB", ospfv3_variables, variable, ospfv3_oid);
#include "vty.h"
#include "prefix.h"
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "lib_errors.h"
#include "ospf6_lsa.h"
zlog_debug("%s", buffer);
}
-static void ospf6_spf_calculation_thread(struct thread *t)
+static void ospf6_spf_calculation_thread(struct event *t)
{
struct ospf6_area *oa;
struct ospf6 *ospf6;
int areas_processed = 0;
char rbuf[32];
- ospf6 = (struct ospf6 *)THREAD_ARG(t);
+ ospf6 = (struct ospf6 *)EVENT_ARG(t);
/* execute SPF calculation */
monotime(&start);
}
/* SPF calculation timer is already scheduled. */
- if (thread_is_scheduled(ospf6->t_spf_calc)) {
+ if (event_is_scheduled(ospf6->t_spf_calc)) {
if (IS_OSPF6_DEBUG_SPF(PROCESS) || IS_OSPF6_DEBUG_SPF(TIME))
zlog_debug(
"SPF: calculation timer is already scheduled: %p",
if (IS_OSPF6_DEBUG_SPF(PROCESS) || IS_OSPF6_DEBUG_SPF(TIME))
zlog_debug("SPF: Rescheduling in %ld msec", delay);
- THREAD_OFF(ospf6->t_spf_calc);
- thread_add_timer_msec(master, ospf6_spf_calculation_thread, ospf6,
- delay, &ospf6->t_spf_calc);
+ EVENT_OFF(ospf6->t_spf_calc);
+ event_add_timer_msec(master, ospf6_spf_calculation_thread, ospf6, delay,
+ &ospf6->t_spf_calc);
}
void ospf6_spf_display_subtree(struct vty *vty, const char *prefix, int rest,
return 0;
}
-static void ospf6_ase_calculate_timer(struct thread *t)
+static void ospf6_ase_calculate_timer(struct event *t)
{
struct ospf6 *ospf6;
struct ospf6_lsa *lsa;
struct ospf6_area *area;
uint16_t type;
- ospf6 = THREAD_ARG(t);
+ ospf6 = EVENT_ARG(t);
/* Calculate external route for each AS-external-LSA */
type = htons(OSPF6_LSTYPE_AS_EXTERNAL);
if (ospf6 == NULL)
return;
- thread_add_timer(master, ospf6_ase_calculate_timer, ospf6,
- OSPF6_ASE_CALC_INTERVAL, &ospf6->t_ase_calc);
+ event_add_timer(master, ospf6_ase_calculate_timer, ospf6,
+ OSPF6_ASE_CALC_INTERVAL, &ospf6->t_ase_calc);
}
#include "linklist.h"
#include "prefix.h"
#include "table.h"
-#include "thread.h"
+#include "frrevent.h"
#include "command.h"
#include "defaults.h"
#include "lib/json.h"
* from VRF and make it "down".
*/
ospf6_vrf_unlink(ospf6, vrf);
- thread_cancel(&ospf6->t_ospf6_receive);
+ event_cancel(&ospf6->t_ospf6_receive);
close(ospf6->fd);
ospf6->fd = -1;
}
ret = ospf6_serv_sock(ospf6);
if (ret < 0 || ospf6->fd <= 0)
return 0;
- thread_add_read(master, ospf6_receive, ospf6, ospf6->fd,
- &ospf6->t_ospf6_receive);
+ event_add_read(master, ospf6_receive, ospf6, ospf6->fd,
+ &ospf6->t_ospf6_receive);
ospf6_router_id_update(ospf6, true);
}
*/
ospf6_gr_nvm_read(ospf6);
- thread_add_read(master, ospf6_receive, ospf6, ospf6->fd,
- &ospf6->t_ospf6_receive);
+ event_add_read(master, ospf6_receive, ospf6, ospf6->fd,
+ &ospf6->t_ospf6_receive);
return ospf6;
}
ospf6_route_remove_all(o->route_table);
ospf6_route_remove_all(o->brouter_table);
- THREAD_OFF(o->maxage_remover);
- THREAD_OFF(o->t_spf_calc);
- THREAD_OFF(o->t_ase_calc);
- THREAD_OFF(o->t_distribute_update);
- THREAD_OFF(o->t_ospf6_receive);
- THREAD_OFF(o->t_external_aggr);
- THREAD_OFF(o->gr_info.t_grace_period);
- THREAD_OFF(o->t_write);
- THREAD_OFF(o->t_abr_task);
+ EVENT_OFF(o->maxage_remover);
+ EVENT_OFF(o->t_spf_calc);
+ EVENT_OFF(o->t_ase_calc);
+ EVENT_OFF(o->t_distribute_update);
+ EVENT_OFF(o->t_ospf6_receive);
+ EVENT_OFF(o->t_external_aggr);
+ EVENT_OFF(o->gr_info.t_grace_period);
+ EVENT_OFF(o->t_write);
+ EVENT_OFF(o->t_abr_task);
}
}
-void ospf6_master_init(struct thread_master *master)
+void ospf6_master_init(struct event_loop *master)
{
memset(&ospf6_master, 0, sizeof(ospf6_master));
om6->master = master;
}
-static void ospf6_maxage_remover(struct thread *thread)
+static void ospf6_maxage_remover(struct event *thread)
{
- struct ospf6 *o = (struct ospf6 *)THREAD_ARG(thread);
+ struct ospf6 *o = (struct ospf6 *)EVENT_ARG(thread);
struct ospf6_area *oa;
struct ospf6_interface *oi;
struct ospf6_neighbor *on;
void ospf6_maxage_remove(struct ospf6 *o)
{
if (o)
- thread_add_timer(master, ospf6_maxage_remover, o,
- OSPF_LSA_MAXAGE_REMOVE_DELAY_DEFAULT,
- &o->maxage_remover);
+ event_add_timer(master, ospf6_maxage_remover, o,
+ OSPF_LSA_MAXAGE_REMOVE_DELAY_DEFAULT,
+ &o->maxage_remover);
}
bool ospf6_router_id_update(struct ospf6 *ospf6, bool init)
} else
json_object_boolean_false_add(json, "spfHasRun");
- if (thread_is_scheduled(o->t_spf_calc)) {
+ if (event_is_scheduled(o->t_spf_calc)) {
long time_store;
json_object_boolean_true_add(json, "spfTimerActive");
threadtimer_string(now, o->t_spf_calc, buf, sizeof(buf));
vty_out(vty, " SPF timer %s%s\n",
- (thread_is_scheduled(o->t_spf_calc) ? "due in "
- : "is "),
+ (event_is_scheduled(o->t_spf_calc) ? "due in " : "is "),
buf);
if (CHECK_FLAG(o->flag, OSPF6_STUB_ROUTER))
/* OSPFv3 instance. */
struct list *ospf6;
/* OSPFv3 thread master. */
- struct thread_master *master;
+ struct event_loop *master;
};
/* ospf6->config_flags */
bool prepare_in_progress;
bool finishing_restart;
uint32_t grace_period;
- struct thread *t_grace_period;
+ struct event *t_grace_period;
};
struct ospf6_gr_helper {
int fd;
/* Threads */
- struct thread *t_spf_calc; /* SPF calculation timer. */
- struct thread *t_ase_calc; /* ASE calculation timer. */
- struct thread *maxage_remover;
- struct thread *t_distribute_update; /* Distirbute update timer. */
- struct thread *t_ospf6_receive; /* OSPF6 receive timer */
- struct thread *t_external_aggr; /* OSPF6 aggregation timer */
+ struct event *t_spf_calc; /* SPF calculation timer. */
+ struct event *t_ase_calc; /* ASE calculation timer. */
+ struct event *maxage_remover;
+ struct event *t_distribute_update; /* Distirbute update timer. */
+ struct event *t_ospf6_receive; /* OSPF6 receive timer */
+ struct event *t_external_aggr; /* OSPF6 aggregation timer */
#define OSPF6_WRITE_INTERFACE_COUNT_DEFAULT 20
- struct thread *t_write;
+ struct event *t_write;
int write_oi_count; /* Num of packets sent per thread invocation */
uint32_t ref_bandwidth;
/* Count of NSSA areas */
uint8_t anyNSSA;
- struct thread *t_abr_task; /* ABR task timer. */
+ struct event *t_abr_task; /* ABR task timer. */
struct list *oi_write_q;
uint32_t redist_count;
extern struct ospf6_master *om6;
/* prototypes */
-extern void ospf6_master_init(struct thread_master *master);
+extern void ospf6_master_init(struct event_loop *master);
extern void install_element_ospf6_clear_process(void);
extern void ospf6_top_init(void);
extern void ospf6_delete(struct ospf6 *o);
[ZEBRA_NEXTHOP_UPDATE] = ospf6_zebra_import_check_update,
};
-void ospf6_zebra_init(struct thread_master *master)
+void ospf6_zebra_init(struct event_loop *master)
{
/* Allocate zebra structure. */
zclient = zclient_new(master, &zclient_options_default, ospf6_handlers,
extern void ospf6_zebra_no_redistribute(int, vrf_id_t vrf_id);
#define ospf6_zebra_is_redistribute(type, vrf_id) \
vrf_bitmap_check(zclient->redist[AFI_IP6][type], vrf_id)
-extern void ospf6_zebra_init(struct thread_master *tm);
+extern void ospf6_zebra_init(struct event_loop *tm);
extern void ospf6_zebra_import_default_route(struct ospf6 *ospf6, bool unreg);
extern void ospf6_zebra_add_discard(struct ospf6_route *request,
struct ospf6 *ospf6);
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "vty.h"
#include "command.h"
}
/* Install ospf related commands. */
-void ospf6_init(struct thread_master *master)
+void ospf6_init(struct event_loop *master)
{
ospf6_top_init();
ospf6_area_init();
#define OSPF6D_H
#include "libospf.h"
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
DECLARE_MGROUP(OSPF6D);
/* global variables */
-extern struct thread_master *master;
+extern struct event_loop *master;
/* Historical for KAME. */
#ifndef IPV6_JOIN_GROUP
extern struct route_node *route_prev(struct route_node *node);
extern void ospf6_debug(void);
-extern void ospf6_init(struct thread_master *master);
+extern void ospf6_init(struct event_loop *master);
#endif /* OSPF6D_H */
#include <lib/version.h>
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "linklist.h"
#include "if.h"
free to use any thread library (like pthreads). */
#include "ospfd/ospf_dump.h" /* for ospf_lsa_header_dump */
-#include "thread.h"
+#include "frrevent.h"
#include "log.h"
/* Local portnumber for async channel. Note that OSPF API library will also
#define ASYNCPORT 4000
/* Master thread */
-struct thread_master *master;
+struct event_loop *master;
/* Global variables */
struct ospf_apiclient *oclient;
* ---------------------------------------------------------
*/
-static void lsa_delete(struct thread *t)
+static void lsa_delete(struct event *t)
{
struct ospf_apiclient *oclient;
struct in_addr area_id;
int rc;
- oclient = THREAD_ARG(t);
+ oclient = EVENT_ARG(t);
rc = inet_aton(args[6], &area_id);
if (rc <= 0) {
printf("done, return code is = %d\n", rc);
}
-static void lsa_inject(struct thread *t)
+static void lsa_inject(struct event *t)
{
struct ospf_apiclient *cl;
struct in_addr ifaddr;
static uint32_t counter = 1; /* Incremented each time invoked */
int rc;
- cl = THREAD_ARG(t);
+ cl = EVENT_ARG(t);
rc = inet_aton(args[5], &ifaddr);
if (rc <= 0) {
/* This thread handles asynchronous messages coming in from the OSPF
API server */
-static void lsa_read(struct thread *thread)
+static void lsa_read(struct event *thread)
{
struct ospf_apiclient *oclient;
int fd;
printf("lsa_read called\n");
- oclient = THREAD_ARG(thread);
- fd = THREAD_FD(thread);
+ oclient = EVENT_ARG(thread);
+ fd = EVENT_FD(thread);
/* Handle asynchronous message */
ret = ospf_apiclient_handle_async(oclient);
}
/* Reschedule read thread */
- thread_add_read(master, lsa_read, oclient, fd, NULL);
+ event_add_read(master, lsa_read, oclient, fd, NULL);
}
/* ---------------------------------------------------------
lsa_type, opaque_type, &addr);
/* Schedule opaque LSA originate in 5 secs */
- thread_add_timer(master, lsa_inject, oclient, 5, NULL);
+ event_add_timer(master, lsa_inject, oclient, 5, NULL);
/* Schedule opaque LSA update with new value */
- thread_add_timer(master, lsa_inject, oclient, 10, NULL);
+ event_add_timer(master, lsa_inject, oclient, 10, NULL);
/* Schedule delete */
- thread_add_timer(master, lsa_delete, oclient, 30, NULL);
+ event_add_timer(master, lsa_delete, oclient, 30, NULL);
}
static void new_if_callback(struct in_addr ifaddr, struct in_addr area_id)
int main(int argc, char *argv[])
{
- struct thread thread;
+ struct event thread;
args = argv;
/* Initialization */
zprivs_preinit(&ospfd_privs);
zprivs_init(&ospfd_privs);
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
/* Open connection to OSPF daemon */
oclient = ospf_apiclient_connect(args[1], ASYNCPORT);
ospf_apiclient_sync_lsdb(oclient);
/* Schedule thread that handles asynchronous messages */
- thread_add_read(master, lsa_read, oclient, oclient->fd_async, NULL);
+ event_add_read(master, lsa_read, oclient, oclient->fd_async, NULL);
/* Now connection is established, run loop */
while (1) {
- thread_fetch(master, &thread);
- thread_call(&thread);
+ event_fetch(master, &thread);
+ event_call(&thread);
}
/* Never reached */
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"
* @param thread
* @return 0.
*/
-static void ospf_abr_announce_non_dna_routers(struct thread *thread)
+static void ospf_abr_announce_non_dna_routers(struct event *thread)
{
struct ospf_area *area;
struct listnode *node;
- struct ospf *ospf = THREAD_ARG(thread);
+ struct ospf *ospf = EVENT_ARG(thread);
- THREAD_OFF(ospf->t_abr_fr);
+ EVENT_OFF(ospf->t_abr_fr);
if (!IS_OSPF_ABR(ospf))
return;
* giving time for route synchronization in
* all the routers.
*/
- thread_add_timer(
- master, ospf_abr_announce_non_dna_routers, ospf,
- OSPF_ABR_DNA_TIMER, &ospf->t_abr_fr);
+ event_add_timer(master,
+ ospf_abr_announce_non_dna_routers, ospf,
+ OSPF_ABR_DNA_TIMER, &ospf->t_abr_fr);
}
}
zlog_debug("%s: Stop", __func__);
}
-static void ospf_abr_task_timer(struct thread *thread)
+static void ospf_abr_task_timer(struct event *thread)
{
- struct ospf *ospf = THREAD_ARG(thread);
+ struct ospf *ospf = EVENT_ARG(thread);
ospf->t_abr_task = 0;
if (IS_DEBUG_OSPF_EVENT)
zlog_debug("Scheduling ABR task");
- thread_add_timer(master, ospf_abr_task_timer, ospf, OSPF_ABR_TASK_DELAY,
- &ospf->t_abr_task);
+ event_add_timer(master, ospf_abr_task_timer, ospf, OSPF_ABR_TASK_DELAY,
+ &ospf->t_abr_task);
}
#include "vty.h"
#include "stream.h"
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "hash.h"
#include "sockunion.h" /* for inet_aton() */
#include "buffer.h"
#include "vty.h"
#include "stream.h"
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "hash.h"
#include "sockunion.h" /* for inet_aton() */
#include "buffer.h"
#include <sys/types.h>
-#include "ospfd/ospfd.h" /* for "struct thread_master" */
+#include "ospfd/ospfd.h" /* for "struct event_loop" */
#include "ospfd/ospf_interface.h"
#include "ospfd/ospf_ism.h"
#include "ospfd/ospf_asbr.h"
{
switch (event) {
case OSPF_APISERVER_ACCEPT:
- (void)thread_add_read(master, ospf_apiserver_accept, apiserv,
- fd, NULL);
+ (void)event_add_read(master, ospf_apiserver_accept, apiserv, fd,
+ NULL);
break;
case OSPF_APISERVER_SYNC_READ:
apiserv->t_sync_read = NULL;
- thread_add_read(master, ospf_apiserver_read, apiserv, fd,
- &apiserv->t_sync_read);
+ event_add_read(master, ospf_apiserver_read, apiserv, fd,
+ &apiserv->t_sync_read);
break;
#ifdef USE_ASYNC_READ
case OSPF_APISERVER_ASYNC_READ:
apiserv->t_async_read = NULL;
- thread_add_read(master, ospf_apiserver_read, apiserv, fd,
- &apiserv->t_async_read);
+ event_add_read(master, ospf_apiserver_read, apiserv, fd,
+ &apiserv->t_async_read);
break;
#endif /* USE_ASYNC_READ */
case OSPF_APISERVER_SYNC_WRITE:
- thread_add_write(master, ospf_apiserver_sync_write, apiserv, fd,
- &apiserv->t_sync_write);
+ event_add_write(master, ospf_apiserver_sync_write, apiserv, fd,
+ &apiserv->t_sync_write);
break;
case OSPF_APISERVER_ASYNC_WRITE:
- thread_add_write(master, ospf_apiserver_async_write, apiserv,
- fd, &apiserv->t_async_write);
+ event_add_write(master, ospf_apiserver_async_write, apiserv, fd,
+ &apiserv->t_async_write);
break;
}
}
struct listnode *node;
/* Cancel read and write threads. */
- THREAD_OFF(apiserv->t_sync_read);
+ EVENT_OFF(apiserv->t_sync_read);
#ifdef USE_ASYNC_READ
- THREAD_OFF(apiserv->t_async_read);
+ EVENT_OFF(apiserv->t_async_read);
#endif /* USE_ASYNC_READ */
- THREAD_OFF(apiserv->t_sync_write);
- THREAD_OFF(apiserv->t_async_write);
+ EVENT_OFF(apiserv->t_sync_write);
+ EVENT_OFF(apiserv->t_async_write);
/* Unregister all opaque types that application registered
and flush opaque LSAs if still in LSDB. */
XFREE(MTYPE_APISERVER, apiserv);
}
-void ospf_apiserver_read(struct thread *thread)
+void ospf_apiserver_read(struct event *thread)
{
struct ospf_apiserver *apiserv;
struct msg *msg;
int fd;
enum ospf_apiserver_event event;
- apiserv = THREAD_ARG(thread);
- fd = THREAD_FD(thread);
+ apiserv = EVENT_ARG(thread);
+ fd = EVENT_FD(thread);
if (fd == apiserv->fd_sync) {
event = OSPF_APISERVER_SYNC_READ;
msg_free(msg);
}
-void ospf_apiserver_sync_write(struct thread *thread)
+void ospf_apiserver_sync_write(struct event *thread)
{
struct ospf_apiserver *apiserv;
struct msg *msg;
int fd;
int rc = -1;
- apiserv = THREAD_ARG(thread);
+ apiserv = EVENT_ARG(thread);
assert(apiserv);
- fd = THREAD_FD(thread);
+ fd = EVENT_FD(thread);
apiserv->t_sync_write = NULL;
}
-void ospf_apiserver_async_write(struct thread *thread)
+void ospf_apiserver_async_write(struct event *thread)
{
struct ospf_apiserver *apiserv;
struct msg *msg;
int fd;
int rc = -1;
- apiserv = THREAD_ARG(thread);
+ apiserv = EVENT_ARG(thread);
assert(apiserv);
- fd = THREAD_FD(thread);
+ fd = EVENT_FD(thread);
apiserv->t_async_write = NULL;
/* Accept connection request from external applications. For each
accepted connection allocate own connection instance. */
-void ospf_apiserver_accept(struct thread *thread)
+void ospf_apiserver_accept(struct event *thread)
{
int accept_sock;
int new_sync_sock;
unsigned int peerlen;
int ret;
- /* THREAD_ARG (thread) is NULL */
- accept_sock = THREAD_FD(thread);
+ /* EVENT_ARG (thread) is NULL */
+ accept_sock = EVENT_FD(thread);
/* Keep hearing on socket for further connections. */
ospf_apiserver_event(OSPF_APISERVER_ACCEPT, accept_sock, NULL);
struct msg_fifo *out_async_fifo;
/* Read and write threads */
- struct thread *t_sync_read;
+ struct event *t_sync_read;
#ifdef USE_ASYNC_READ
- struct thread *t_async_read;
+ struct event *t_async_read;
#endif /* USE_ASYNC_READ */
- struct thread *t_sync_write;
- struct thread *t_async_write;
+ struct event *t_sync_write;
+ struct event *t_async_write;
};
enum ospf_apiserver_event {
extern void ospf_apiserver_event(enum ospf_apiserver_event event, int fd,
struct ospf_apiserver *apiserv);
extern int ospf_apiserver_serv_sock_family(unsigned short port, int family);
-extern void ospf_apiserver_accept(struct thread *thread);
-extern void ospf_apiserver_read(struct thread *thread);
-extern void ospf_apiserver_sync_write(struct thread *thread);
-extern void ospf_apiserver_async_write(struct thread *thread);
+extern void ospf_apiserver_accept(struct event *thread);
+extern void ospf_apiserver_read(struct event *thread);
+extern void ospf_apiserver_sync_write(struct event *thread);
+extern void ospf_apiserver_async_write(struct event *thread);
extern int ospf_apiserver_send_reply(struct ospf_apiserver *apiserv,
uint32_t seqnr, uint8_t rc);
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"
/* If there's redistribution configured, we need to refresh external
* LSAs in order to install Type-7 and flood to all NSSA Areas
*/
-static void ospf_asbr_nssa_redist_update_timer(struct thread *thread)
+static void ospf_asbr_nssa_redist_update_timer(struct event *thread)
{
- struct ospf *ospf = THREAD_ARG(thread);
+ struct ospf *ospf = EVENT_ARG(thread);
int type;
ospf->t_asbr_nssa_redist_update = NULL;
if (IS_DEBUG_OSPF_EVENT)
zlog_debug("Scheduling ASBR NSSA redistribution update");
- thread_add_timer(master, ospf_asbr_nssa_redist_update_timer, ospf,
- OSPF_ASBR_NSSA_REDIST_UPDATE_DELAY,
- &ospf->t_asbr_nssa_redist_update);
+ event_add_timer(master, ospf_asbr_nssa_redist_update_timer, ospf,
+ OSPF_ASBR_NSSA_REDIST_UPDATE_DELAY,
+ &ospf->t_asbr_nssa_redist_update);
}
void ospf_redistribute_withdraw(struct ospf *ospf, uint8_t type,
void ospf_external_aggregator_free(struct ospf_external_aggr_rt *aggr)
{
- if (OSPF_EXTERNAL_RT_COUNT(aggr))
- hash_clean(aggr->match_extnl_hash,
- (void *)ospf_aggr_unlink_external_info);
+ hash_clean_and_free(&aggr->match_extnl_hash,
+ (void *)ospf_aggr_unlink_external_info);
if (IS_DEBUG_OSPF(lsa, EXTNL_LSA_AGGR))
zlog_debug("%s: Release the aggregator Address(%pI4/%d)",
__func__, &aggr->p.prefix, aggr->p.prefixlen);
- hash_free(aggr->match_extnl_hash);
- aggr->match_extnl_hash = NULL;
XFREE(MTYPE_OSPF_EXTERNAL_RT_AGGR, aggr);
}
aggr->action = OSPF_ROUTE_AGGR_NONE;
ospf_external_aggr_delete(ospf, rn);
- if (OSPF_EXTERNAL_RT_COUNT(aggr))
- hash_clean(
- aggr->match_extnl_hash,
- (void *)ospf_aggr_handle_external_info);
-
- hash_free(aggr->match_extnl_hash);
- XFREE(MTYPE_OSPF_EXTERNAL_RT_AGGR, aggr);
+ hash_clean_and_free(
+ &aggr->match_extnl_hash,
+ (void *)ospf_aggr_handle_external_info);
} else if (aggr->action == OSPF_ROUTE_AGGR_MODIFY) {
}
}
-static void ospf_asbr_external_aggr_process(struct thread *thread)
+static void ospf_asbr_external_aggr_process(struct event *thread)
{
- struct ospf *ospf = THREAD_ARG(thread);
+ struct ospf *ospf = EVENT_ARG(thread);
int operation = 0;
ospf->t_external_aggr = NULL;
zlog_debug(
"%s, Restarting Aggregator delay timer.",
__func__);
- THREAD_OFF(ospf->t_external_aggr);
+ EVENT_OFF(ospf->t_external_aggr);
}
}
__func__, ospf->aggr_delay_interval);
ospf->aggr_action = operation;
- thread_add_timer(master, ospf_asbr_external_aggr_process, ospf,
- ospf->aggr_delay_interval, &ospf->t_external_aggr);
+ event_add_timer(master, ospf_asbr_external_aggr_process, ospf,
+ ospf->aggr_delay_interval, &ospf->t_external_aggr);
}
int ospf_asbr_external_aggregator_set(struct ospf *ospf, struct prefix_ipv4 *p,
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "hash.h"
#include "linklist.h"
return 0;
}
-static void ospf_ase_calculate_timer(struct thread *t)
+static void ospf_ase_calculate_timer(struct event *t)
{
struct ospf *ospf;
struct ospf_lsa *lsa;
struct ospf_area *area;
struct timeval start_time, stop_time;
- ospf = THREAD_ARG(t);
+ ospf = EVENT_ARG(t);
ospf->t_ase_calc = NULL;
if (ospf->ase_calc) {
if (ospf == NULL)
return;
- thread_add_timer(master, ospf_ase_calculate_timer, ospf,
- OSPF_ASE_CALC_INTERVAL, &ospf->t_ase_calc);
+ event_add_timer(master, ospf_ase_calculate_timer, ospf,
+ OSPF_ASE_CALC_INTERVAL, &ospf->t_ase_calc);
}
void ospf_ase_register_external_lsa(struct ospf_lsa *lsa, struct ospf *top)
#include "linklist.h"
#include "memory.h"
#include "prefix.h"
-#include "thread.h"
+#include "frrevent.h"
#include "buffer.h"
#include "stream.h"
#include "zclient.h"
return CMD_SUCCESS;
}
-void ospf_bfd_init(struct thread_master *tm)
+void ospf_bfd_init(struct event_loop *tm)
{
bfd_protocol_integration_init(zclient, tm);
#include "ospfd/ospf_interface.h"
#include "json.h"
-extern void ospf_bfd_init(struct thread_master *tm);
+extern void ospf_bfd_init(struct event_loop *tm);
extern void ospf_bfd_write_config(struct vty *vty,
const struct ospf_if_params *params);
#include "lib/bfd.h"
#include "monotime.h"
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "command.h"
#include "stream.h"
return buf;
}
-const char *ospf_timer_dump(struct thread *t, char *buf, size_t size)
+const char *ospf_timer_dump(struct event *t, char *buf, size_t size)
{
struct timeval result;
if (!t)
extern char *ospf_lsa_type_str[];
/* Prototypes. */
-extern const char *ospf_area_name_string(struct ospf_area *);
-extern const char *ospf_area_desc_string(struct ospf_area *);
-extern const char *ospf_if_name_string(struct ospf_interface *);
+extern const char *ospf_area_name_string(struct ospf_area *area);
+extern const char *ospf_area_desc_string(struct ospf_area *area);
+extern const char *ospf_if_name_string(struct ospf_interface *oip);
extern int ospf_nbr_ism_state(struct ospf_neighbor *nbr);
extern void ospf_nbr_ism_state_message(struct ospf_neighbor *nbr, char *buf,
size_t size);
-extern const char *ospf_timer_dump(struct thread *, char *, size_t);
-extern const char *ospf_timeval_dump(struct timeval *, char *, size_t);
-extern void ospf_packet_dump(struct stream *);
+extern const char *ospf_timer_dump(struct event *e, char *buf, size_t size);
+extern const char *ospf_timeval_dump(struct timeval *t, char *buf, size_t size);
+extern void ospf_packet_dump(struct stream *s);
extern void ospf_debug_init(void);
/* Appropriate buffer size to use with ospf_timer_dump and ospf_timeval_dump: */
#include "vty.h"
#include "stream.h"
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "hash.h"
#include "sockunion.h" /* for inet_aton() */
#include "network.h"
#include "if.h"
#include "command.h"
#include "table.h"
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "log.h"
#include "zclient.h"
zlog_debug("GR: exiting graceful restart: %s", reason);
ospf->gr_info.restart_in_progress = false;
- THREAD_OFF(ospf->gr_info.t_grace_period);
+ EVENT_OFF(ospf->gr_info.t_grace_period);
/* Record in non-volatile memory that the restart is complete. */
ospf_gr_nvm_delete(ospf);
}
/* Handling of grace period expiry. */
-static void ospf_gr_grace_period_expired(struct thread *thread)
+static void ospf_gr_grace_period_expired(struct event *thread)
{
- struct ospf *ospf = THREAD_ARG(thread);
+ struct ospf *ospf = EVENT_ARG(thread);
ospf->gr_info.t_grace_period = NULL;
ospf_gr_restart_exit(ospf, "grace period has expired");
zlog_debug(
"GR: remaining time until grace period expires: %lu(s)",
remaining_time);
- thread_add_timer(master, ospf_gr_grace_period_expired,
- ospf, remaining_time,
- &ospf->gr_info.t_grace_period);
+ event_add_timer(master, ospf_gr_grace_period_expired,
+ ospf, remaining_time,
+ &ospf->gr_info.t_grace_period);
}
}
* helper until this timer until
* this timer expires.
*/
- struct thread *t_grace_timer;
+ struct event *t_grace_timer;
/* Helper status */
uint32_t gr_helper_status;
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"
if (ospf->enable_rtr_list == NULL)
return;
- hash_clean(ospf->enable_rtr_list, ospf_disable_rtr_hash_free);
- hash_free(ospf->enable_rtr_list);
- ospf->enable_rtr_list = NULL;
+ hash_clean_and_free(&ospf->enable_rtr_list, ospf_disable_rtr_hash_free);
}
/*
* Returns:
* Nothing
*/
-static void ospf_handle_grace_timer_expiry(struct thread *thread)
+static void ospf_handle_grace_timer_expiry(struct event *thread)
{
- struct ospf_neighbor *nbr = THREAD_ARG(thread);
+ struct ospf_neighbor *nbr = EVENT_ARG(thread);
nbr->gr_helper_info.t_grace_timer = NULL;
if (OSPF_GR_IS_ACTIVE_HELPER(restarter)) {
if (restarter->gr_helper_info.t_grace_timer)
- THREAD_OFF(restarter->gr_helper_info.t_grace_timer);
+ EVENT_OFF(restarter->gr_helper_info.t_grace_timer);
if (ospf->active_restarter_cnt > 0)
ospf->active_restarter_cnt--;
actual_grace_interval);
/* Start the grace timer */
- thread_add_timer(master, ospf_handle_grace_timer_expiry, restarter,
- actual_grace_interval,
- &restarter->gr_helper_info.t_grace_timer);
+ event_add_timer(master, ospf_handle_grace_timer_expiry, restarter,
+ actual_grace_interval,
+ &restarter->gr_helper_info.t_grace_timer);
return OSPF_GR_ACTIVE_HELPER;
}
* expiry, stop the grace timer.
*/
if (reason != OSPF_GR_HELPER_GRACE_TIMEOUT)
- THREAD_OFF(nbr->gr_helper_info.t_grace_timer);
+ EVENT_OFF(nbr->gr_helper_info.t_grace_timer);
/* check exit triggered due to successful completion
* of graceful restart.
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "hash.h"
#include "linklist.h"
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "prefix.h"
#include "if.h"
/* oi->nbrs and oi->nbr_nbma should be deleted on InterfaceDown event */
/* delete all static neighbors attached to this interface */
for (ALL_LIST_ELEMENTS(oi->nbr_nbma, node, nnode, nbr_nbma)) {
- THREAD_OFF(nbr_nbma->t_poll);
+ EVENT_OFF(nbr_nbma->t_poll);
if (nbr_nbma->nbr) {
nbr_nbma->nbr->nbr_nbma = NULL;
listnode_delete(oi->ospf->oiflist, oi);
listnode_delete(oi->area->oiflist, oi);
- thread_cancel_event(master, oi);
+ event_cancel_event(master, oi);
memset(oi, 0, sizeof(*oi));
XFREE(MTYPE_OSPF_IF, oi);
if (oi->on_write_q) {
listnode_delete(ospf->oi_write_q, oi);
if (list_isempty(ospf->oi_write_q))
- THREAD_OFF(ospf->t_write);
+ EVENT_OFF(ospf->t_write);
oi->on_write_q = 0;
}
}
if_delete(&ifp);
if (!vrf_is_enabled(vrf))
vrf_delete(vrf);
- vlink_count--;
}
/* for a defined area, count the number of configured vl
ospf_hello_send(oi);
/* Restart hello timer for this interface */
- THREAD_OFF(oi->t_hello);
+ EVENT_OFF(oi->t_hello);
OSPF_HELLO_TIMER_ON(oi);
}
ospf_hello_send(oi);
/* Restart the hello timer. */
- THREAD_OFF(oi->t_hello);
+ EVENT_OFF(oi->t_hello);
OSPF_HELLO_TIMER_ON(oi);
}
}
uint32_t v_ls_ack; /* Delayed Link State Acknowledgment */
/* Threads. */
- struct thread *t_hello; /* timer */
- struct thread *t_wait; /* timer */
- struct thread *t_ls_ack; /* timer */
- struct thread *t_ls_ack_direct; /* event */
- struct thread *t_ls_upd_event; /* event */
- struct thread *t_opaque_lsa_self; /* Type-9 Opaque-LSAs */
+ struct event *t_hello; /* timer */
+ struct event *t_wait; /* timer */
+ struct event *t_ls_ack; /* timer */
+ struct event *t_ls_ack_direct; /* event */
+ struct event *t_ls_upd_event; /* event */
+ struct event *t_opaque_lsa_self; /* Type-9 Opaque-LSAs */
int on_write_q;
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "prefix.h"
#include "if.h"
}
-void ospf_hello_timer(struct thread *thread)
+void ospf_hello_timer(struct event *thread)
{
struct ospf_interface *oi;
- oi = THREAD_ARG(thread);
+ oi = EVENT_ARG(thread);
oi->t_hello = NULL;
if (IS_DEBUG_OSPF(ism, ISM_TIMERS))
OSPF_HELLO_TIMER_ON(oi);
}
-static void ospf_wait_timer(struct thread *thread)
+static void ospf_wait_timer(struct event *thread)
{
struct ospf_interface *oi;
- oi = THREAD_ARG(thread);
+ oi = EVENT_ARG(thread);
oi->t_wait = NULL;
if (IS_DEBUG_OSPF(ism, ISM_TIMERS))
interface parameters must be set to initial values, and
timers are
reset also. */
- THREAD_OFF(oi->t_hello);
- THREAD_OFF(oi->t_wait);
- THREAD_OFF(oi->t_ls_ack);
+ EVENT_OFF(oi->t_hello);
+ EVENT_OFF(oi->t_wait);
+ EVENT_OFF(oi->t_ls_ack);
break;
case ISM_Loopback:
/* In this state, the interface may be looped back and will be
unavailable for regular data traffic. */
- THREAD_OFF(oi->t_hello);
- THREAD_OFF(oi->t_wait);
- THREAD_OFF(oi->t_ls_ack);
+ EVENT_OFF(oi->t_hello);
+ EVENT_OFF(oi->t_wait);
+ EVENT_OFF(oi->t_ls_ack);
break;
case ISM_Waiting:
/* The router is trying to determine the identity of DRouter and
OSPF_ISM_TIMER_MSEC_ON(oi->t_hello, ospf_hello_timer, 1);
OSPF_ISM_TIMER_ON(oi->t_wait, ospf_wait_timer,
OSPF_IF_PARAM(oi, v_wait));
- THREAD_OFF(oi->t_ls_ack);
+ EVENT_OFF(oi->t_ls_ack);
break;
case ISM_PointToPoint:
/* The interface connects to a physical Point-to-point network
neighboring router. Hello packets are also sent. */
/* send first hello immediately */
OSPF_ISM_TIMER_MSEC_ON(oi->t_hello, ospf_hello_timer, 1);
- THREAD_OFF(oi->t_wait);
+ EVENT_OFF(oi->t_wait);
OSPF_ISM_TIMER_ON(oi->t_ls_ack, ospf_ls_ack_timer,
oi->v_ls_ack);
break;
and the router itself is neither Designated Router nor
Backup Designated Router. */
OSPF_HELLO_TIMER_ON(oi);
- THREAD_OFF(oi->t_wait);
+ EVENT_OFF(oi->t_wait);
OSPF_ISM_TIMER_ON(oi->t_ls_ack, ospf_ls_ack_timer,
oi->v_ls_ack);
break;
network,
and the router is Backup Designated Router. */
OSPF_HELLO_TIMER_ON(oi);
- THREAD_OFF(oi->t_wait);
+ EVENT_OFF(oi->t_wait);
OSPF_ISM_TIMER_ON(oi->t_ls_ack, ospf_ls_ack_timer,
oi->v_ls_ack);
break;
network,
and the router is Designated Router. */
OSPF_HELLO_TIMER_ON(oi);
- THREAD_OFF(oi->t_wait);
+ EVENT_OFF(oi->t_wait);
OSPF_ISM_TIMER_ON(oi->t_ls_ack, ospf_ls_ack_timer,
oi->v_ls_ack);
break;
}
/* Execute ISM event process. */
-void ospf_ism_event(struct thread *thread)
+void ospf_ism_event(struct event *thread)
{
int event;
int next_state;
struct ospf_interface *oi;
- oi = THREAD_ARG(thread);
- event = THREAD_VAL(thread);
+ oi = EVENT_ARG(thread);
+ event = EVENT_VAL(thread);
/* Call function. */
next_state = (*(ISM[oi->state][event].func))(oi);
oi->on_write_q = 1; \
} \
if (!list_isempty((O)->oi_write_q)) \
- thread_add_write(master, ospf_write, (O), (O)->fd, \
- &(O)->t_write); \
+ event_add_write(master, ospf_write, (O), (O)->fd, \
+ &(O)->t_write); \
} while (0)
/* Macro for OSPF ISM timer turn on. */
-#define OSPF_ISM_TIMER_ON(T, F, V) thread_add_timer(master, (F), oi, (V), &(T))
+#define OSPF_ISM_TIMER_ON(T, F, V) event_add_timer(master, (F), oi, (V), &(T))
#define OSPF_ISM_TIMER_MSEC_ON(T, F, V) \
- thread_add_timer_msec(master, (F), oi, (V), &(T))
+ event_add_timer_msec(master, (F), oi, (V), &(T))
/* convenience macro to set hello timer correctly, according to
* whether fast-hello is set or not
/* Macro for OSPF schedule event. */
#define OSPF_ISM_EVENT_SCHEDULE(I, E) \
- thread_add_event(master, ospf_ism_event, (I), (E), NULL)
+ event_add_event(master, ospf_ism_event, (I), (E), NULL)
/* Macro for OSPF execute event. */
#define OSPF_ISM_EVENT_EXECUTE(I, E) \
- thread_execute(master, ospf_ism_event, (I), (E))
+ event_execute(master, ospf_ism_event, (I), (E))
/* Prototypes. */
-extern void ospf_ism_event(struct thread *thread);
+extern void ospf_ism_event(struct event *thread);
extern void ism_change_status(struct ospf_interface *, int);
-extern void ospf_hello_timer(struct thread *thread);
+extern void ospf_hello_timer(struct event *thread);
extern int ospf_dr_election(struct ospf_interface *oi);
DECLARE_HOOK(ospf_ism_change,
#include "monotime.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "table.h"
#include "vty.h"
if (ldp_sync_info && ldp_sync_info->enabled == LDP_IGP_SYNC_ENABLED) {
if (ldp_sync_info->state == LDP_IGP_SYNC_STATE_REQUIRED_NOT_UP)
ldp_sync_info->state = LDP_IGP_SYNC_STATE_REQUIRED_UP;
- THREAD_OFF(ldp_sync_info->t_holddown);
+ EVENT_OFF(ldp_sync_info->t_holddown);
ospf_if_recalculate_output_cost(ifp);
}
}
if (ldp_sync_info &&
ldp_sync_info->enabled == LDP_IGP_SYNC_ENABLED &&
ldp_sync_info->state != LDP_IGP_SYNC_STATE_NOT_REQUIRED) {
- THREAD_OFF(ldp_sync_info->t_holddown);
+ EVENT_OFF(ldp_sync_info->t_holddown);
ldp_sync_info->state = LDP_IGP_SYNC_STATE_REQUIRED_NOT_UP;
ospf_if_recalculate_output_cost(ifp);
}
*/
ols_debug("%s: Removed from if %s", __func__, ifp->name);
- THREAD_OFF(ldp_sync_info->t_holddown);
+ EVENT_OFF(ldp_sync_info->t_holddown);
ldp_sync_info->state = LDP_IGP_SYNC_STATE_NOT_REQUIRED;
ospf_if_recalculate_output_cost(ifp);
/*
* LDP-SYNC holddown timer routines
*/
-static void ospf_ldp_sync_holddown_timer(struct thread *thread)
+static void ospf_ldp_sync_holddown_timer(struct event *thread)
{
struct interface *ifp;
struct ospf_if_params *params;
* didn't receive msg from LDP indicating sync-complete
* restore interface cost to original value
*/
- ifp = THREAD_ARG(thread);
+ ifp = EVENT_ARG(thread);
params = IF_DEF_PARAMS(ifp);
if (params->ldp_sync_info) {
ldp_sync_info = params->ldp_sync_info;
ols_debug("%s: start holddown timer for %s time %d", __func__,
ifp->name, ldp_sync_info->holddown);
- thread_add_timer(master, ospf_ldp_sync_holddown_timer,
- ifp, ldp_sync_info->holddown,
- &ldp_sync_info->t_holddown);
+ event_add_timer(master, ospf_ldp_sync_holddown_timer, ifp,
+ ldp_sync_info->holddown, &ldp_sync_info->t_holddown);
}
/*
SET_FLAG(ldp_sync_info->flags, LDP_SYNC_FLAG_IF_CONFIG);
ldp_sync_info->enabled = LDP_IGP_SYNC_DEFAULT;
ldp_sync_info->state = LDP_IGP_SYNC_STATE_NOT_REQUIRED;
- THREAD_OFF(ldp_sync_info->t_holddown);
+ EVENT_OFF(ldp_sync_info->t_holddown);
ospf_if_recalculate_output_cost(ifp);
return CMD_SUCCESS;
#include "memory.h"
#include "stream.h"
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "hash.h"
#include "sockunion.h" /* for inet_aton() */
#include "checksum.h"
stream_putw_at(*s, putp, cnt);
}
-static void ospf_stub_router_timer(struct thread *t)
+static void ospf_stub_router_timer(struct event *t)
{
- struct ospf_area *area = THREAD_ARG(t);
+ struct ospf_area *area = EVENT_ARG(t);
area->t_stub_router = NULL;
struct in_addr old_id)
{
struct ospf_lsa *lsa = NULL;
- struct ospf_lsa *new = NULL;
+ struct ospf_lsa *summary_lsa = NULL;
struct summary_lsa *sl = NULL;
struct ospf_area *old_area = NULL;
struct ospf *ospf = area->ospf;
if (type == OSPF_SUMMARY_LSA) {
/*Refresh the LSA with new LSA*/
- ospf_summary_lsa_refresh(ospf, lsa);
+ summary_lsa = ospf_summary_lsa_refresh(ospf, lsa);
- new = ospf_summary_lsa_prepare_and_flood(
- &old_prefix, old_metric, old_area, old_id);
+ ospf_summary_lsa_prepare_and_flood(&old_prefix, old_metric,
+ old_area, old_id);
} else {
/*Refresh the LSA with new LSA*/
- ospf_summary_asbr_lsa_refresh(ospf, lsa);
+ summary_lsa = ospf_summary_asbr_lsa_refresh(ospf, lsa);
- new = ospf_asbr_summary_lsa_prepare_and_flood(
- &old_prefix, old_metric, old_area, old_id);
+ ospf_asbr_summary_lsa_prepare_and_flood(&old_prefix, old_metric,
+ old_area, old_id);
}
- return new;
+ return summary_lsa;
}
/* Originate Summary-LSA. */
}
-void ospf_maxage_lsa_remover(struct thread *thread)
+void ospf_maxage_lsa_remover(struct event *thread)
{
- struct ospf *ospf = THREAD_ARG(thread);
+ struct ospf *ospf = EVENT_ARG(thread);
struct ospf_lsa *lsa, *old;
struct route_node *rn;
int reschedule = 0;
}
/* TODO: maybe convert this function to a work-queue */
- if (thread_should_yield(thread)) {
+ if (event_should_yield(thread)) {
OSPF_TIMER_ON(ospf->t_maxage,
ospf_maxage_lsa_remover, 0);
route_unlock_node(
}
/* Periodical check of MaxAge LSA. */
-void ospf_lsa_maxage_walker(struct thread *thread)
+void ospf_lsa_maxage_walker(struct event *thread)
{
- struct ospf *ospf = THREAD_ARG(thread);
+ struct ospf *ospf = EVENT_ARG(thread);
struct route_node *rn;
struct ospf_lsa *lsa;
struct ospf_area *area;
* without conflicting to other threads.
*/
if (ospf->t_maxage != NULL) {
- THREAD_OFF(ospf->t_maxage);
- thread_execute(master, ospf_maxage_lsa_remover, ospf, 0);
+ EVENT_OFF(ospf->t_maxage);
+ event_execute(master, ospf_maxage_lsa_remover, ospf, 0);
}
return;
struct ospf_lsa *lsa;
};
-static void ospf_lsa_action(struct thread *t)
+static void ospf_lsa_action(struct event *t)
{
struct lsa_action *data;
- data = THREAD_ARG(t);
+ data = EVENT_ARG(t);
if (IS_DEBUG_OSPF(lsa, LSA) == OSPF_DEBUG_LSA)
zlog_debug("LSA[Action]: Performing scheduled LSA action: %d",
data->area = area;
data->lsa = ospf_lsa_lock(lsa); /* Message / Flood area */
- thread_add_event(master, ospf_lsa_action, data, 0, NULL);
+ event_add_event(master, ospf_lsa_action, data, 0, NULL);
}
void ospf_schedule_lsa_flush_area(struct ospf_area *area, struct ospf_lsa *lsa)
data->area = area;
data->lsa = ospf_lsa_lock(lsa); /* Message / Flush area */
- thread_add_event(master, ospf_lsa_action, data, 0, NULL);
+ event_add_event(master, ospf_lsa_action, data, 0, NULL);
}
}
}
-void ospf_lsa_refresh_walker(struct thread *t)
+void ospf_lsa_refresh_walker(struct event *t)
{
struct list *refresh_list;
struct listnode *node, *nnode;
- struct ospf *ospf = THREAD_ARG(t);
+ struct ospf *ospf = EVENT_ARG(t);
struct ospf_lsa *lsa;
int i;
struct list *lsa_to_refresh = list_new();
}
ospf->t_lsa_refresher = NULL;
- thread_add_timer(master, ospf_lsa_refresh_walker, ospf,
- ospf->lsa_refresh_interval, &ospf->t_lsa_refresher);
+ event_add_timer(master, ospf_lsa_refresh_walker, ospf,
+ ospf->lsa_refresh_interval, &ospf->t_lsa_refresher);
ospf->lsa_refresher_started = monotime(NULL);
for (ALL_LIST_ELEMENTS(lsa_to_refresh, node, nnode, lsa)) {
dna_lsa = ospf_check_dna_lsa(lsa);
if (!dna_lsa) { /* refresh only non-DNA LSAs */
ospf_lsa_refresh(ospf, lsa);
- assert(lsa->lock > 0);
- ospf_lsa_unlock(&lsa); /* lsa_refresh_queue & temp for
- * lsa_to_refresh.
- */
}
+ assert(lsa->lock > 0);
+ ospf_lsa_unlock(&lsa); /* lsa_refresh_queue & temp for
+ * lsa_to_refresh.
+ */
}
list_delete(&lsa_to_refresh);
extern void ospf_lsa_maxage(struct ospf *, struct ospf_lsa *);
extern uint32_t get_metric(uint8_t *);
-extern void ospf_lsa_maxage_walker(struct thread *thread);
+extern void ospf_lsa_maxage_walker(struct event *thread);
extern struct ospf_lsa *ospf_lsa_refresh(struct ospf *, struct ospf_lsa *);
extern void ospf_external_lsa_refresh_default(struct ospf *);
extern void ospf_refresher_register_lsa(struct ospf *, struct ospf_lsa *);
extern void ospf_refresher_unregister_lsa(struct ospf *, struct ospf_lsa *);
-extern void ospf_lsa_refresh_walker(struct thread *thread);
+extern void ospf_lsa_refresh_walker(struct event *thread);
extern void ospf_lsa_maxage_delete(struct ospf *, struct ospf_lsa *);
struct ospf_lsa *lsa);
extern void ospf_flush_lsa_from_area(struct ospf *ospf, struct in_addr area_id,
int type);
-extern void ospf_maxage_lsa_remover(struct thread *thread);
+extern void ospf_maxage_lsa_remover(struct event *thread);
extern bool ospf_check_dna_lsa(const struct ospf_lsa *lsa);
extern void ospf_refresh_area_self_lsas(struct ospf_area *area);
#include <lib/version.h>
#include "bfd.h"
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "linklist.h"
#include "if.h"
/* OSPFd program name */
/* Master of threads. */
-struct thread_master *master;
+struct event_loop *master;
#ifdef SUPPORT_OSPF_API
extern int ospf_apiserver_enable;
#include "prefix.h"
#include "memory.h"
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "stream.h"
#include "table.h"
#include "log.h"
}
/* Cancel all timers. */
- THREAD_OFF(nbr->t_inactivity);
- THREAD_OFF(nbr->t_db_desc);
- THREAD_OFF(nbr->t_ls_req);
- THREAD_OFF(nbr->t_ls_upd);
+ EVENT_OFF(nbr->t_inactivity);
+ EVENT_OFF(nbr->t_db_desc);
+ EVENT_OFF(nbr->t_ls_req);
+ EVENT_OFF(nbr->t_ls_upd);
/* Cancel all events. */ /* Thread lookup cost would be negligible. */
- thread_cancel_event(master, nbr);
+ event_cancel_event(master, nbr);
bfd_sess_free(&nbr->bfd_session);
- THREAD_OFF(nbr->gr_helper_info.t_grace_timer);
+ EVENT_OFF(nbr->gr_helper_info.t_grace_timer);
nbr->oi = NULL;
XFREE(MTYPE_OSPF_NEIGHBOR, nbr);
nbr->nbr_nbma = nbr_nbma;
if (nbr_nbma->t_poll)
- THREAD_OFF(nbr_nbma->t_poll);
+ EVENT_OFF(nbr_nbma->t_poll);
nbr->state_change = nbr_nbma->state_change + 1;
}
uint32_t v_ls_upd;
/* Threads. */
- struct thread *t_inactivity;
- struct thread *t_db_desc;
- struct thread *t_ls_req;
- struct thread *t_ls_upd;
- struct thread *t_hello_reply;
+ struct event *t_inactivity;
+ struct event *t_db_desc;
+ struct event *t_ls_req;
+ struct event *t_ls_upd;
+ struct event *t_hello_reply;
/* NBMA configured neighbour */
struct ospf_nbr_nbma *nbr_nbma;
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "prefix.h"
#include "if.h"
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "hash.h"
#include "linklist.h"
static void nsm_clear_adj(struct ospf_neighbor *);
/* OSPF NSM Timer functions. */
-static void ospf_inactivity_timer(struct thread *thread)
+static void ospf_inactivity_timer(struct event *thread)
{
struct ospf_neighbor *nbr;
- nbr = THREAD_ARG(thread);
+ nbr = EVENT_ARG(thread);
nbr->t_inactivity = NULL;
if (IS_DEBUG_OSPF(nsm, NSM_TIMERS))
}
}
-static void ospf_db_desc_timer(struct thread *thread)
+static void ospf_db_desc_timer(struct event *thread)
{
struct ospf_neighbor *nbr;
- nbr = THREAD_ARG(thread);
+ nbr = EVENT_ARG(thread);
nbr->t_db_desc = NULL;
if (IS_DEBUG_OSPF(nsm, NSM_TIMERS))
switch (nbr->state) {
case NSM_Deleted:
case NSM_Down:
- THREAD_OFF(nbr->t_inactivity);
- THREAD_OFF(nbr->t_hello_reply);
+ EVENT_OFF(nbr->t_inactivity);
+ EVENT_OFF(nbr->t_hello_reply);
/* fallthru */
case NSM_Attempt:
case NSM_Init:
case NSM_TwoWay:
- THREAD_OFF(nbr->t_db_desc);
- THREAD_OFF(nbr->t_ls_upd);
- THREAD_OFF(nbr->t_ls_req);
+ EVENT_OFF(nbr->t_db_desc);
+ EVENT_OFF(nbr->t_ls_upd);
+ EVENT_OFF(nbr->t_ls_req);
break;
case NSM_ExStart:
OSPF_NSM_TIMER_ON(nbr->t_db_desc, ospf_db_desc_timer,
nbr->v_db_desc);
- THREAD_OFF(nbr->t_ls_upd);
- THREAD_OFF(nbr->t_ls_req);
+ EVENT_OFF(nbr->t_ls_upd);
+ EVENT_OFF(nbr->t_ls_req);
break;
case NSM_Exchange:
OSPF_NSM_TIMER_ON(nbr->t_ls_upd, ospf_ls_upd_timer,
nbr->v_ls_upd);
if (!IS_SET_DD_MS(nbr->dd_flags))
- THREAD_OFF(nbr->t_db_desc);
+ EVENT_OFF(nbr->t_db_desc);
break;
case NSM_Loading:
case NSM_Full:
default:
- THREAD_OFF(nbr->t_db_desc);
+ EVENT_OFF(nbr->t_db_desc);
break;
}
}
static int nsm_hello_received(struct ospf_neighbor *nbr)
{
/* Start or Restart Inactivity Timer. */
- THREAD_OFF(nbr->t_inactivity);
+ EVENT_OFF(nbr->t_inactivity);
OSPF_NSM_TIMER_ON(nbr->t_inactivity, ospf_inactivity_timer,
nbr->v_inactivity);
if (nbr->oi->type == OSPF_IFTYPE_NBMA && nbr->nbr_nbma)
- THREAD_OFF(nbr->nbr_nbma->t_poll);
+ EVENT_OFF(nbr->nbr_nbma->t_poll);
/* Send proactive ARP requests */
if (nbr->state < NSM_Exchange)
static int nsm_start(struct ospf_neighbor *nbr)
{
if (nbr->nbr_nbma)
- THREAD_OFF(nbr->nbr_nbma->t_poll);
+ EVENT_OFF(nbr->nbr_nbma->t_poll);
- THREAD_OFF(nbr->t_inactivity);
+ EVENT_OFF(nbr->t_inactivity);
OSPF_NSM_TIMER_ON(nbr->t_inactivity, ospf_inactivity_timer,
nbr->v_inactivity);
}
/* Execute NSM event process. */
-void ospf_nsm_event(struct thread *thread)
+void ospf_nsm_event(struct event *thread)
{
int event;
int next_state;
struct ospf_neighbor *nbr;
- nbr = THREAD_ARG(thread);
- event = THREAD_VAL(thread);
+ nbr = EVENT_ARG(thread);
+ event = EVENT_VAL(thread);
if (IS_DEBUG_OSPF(nsm, NSM_EVENTS))
zlog_debug("NSM[%s:%pI4:%s]: %s (%s)", IF_NAME(nbr->oi),
#define OSPF_NSM_EVENT_MAX 14
/* Macro for OSPF NSM timer turn on. */
-#define OSPF_NSM_TIMER_ON(T,F,V) thread_add_timer (master, (F), nbr, (V), &(T))
+#define OSPF_NSM_TIMER_ON(T, F, V) event_add_timer(master, (F), nbr, (V), &(T))
/* Macro for OSPF NSM schedule event. */
#define OSPF_NSM_EVENT_SCHEDULE(N, E) \
- thread_add_event(master, ospf_nsm_event, (N), (E), NULL)
+ event_add_event(master, ospf_nsm_event, (N), (E), NULL)
/* Macro for OSPF NSM execute event. */
#define OSPF_NSM_EVENT_EXECUTE(N, E) \
- thread_execute(master, ospf_nsm_event, (N), (E))
+ event_execute(master, ospf_nsm_event, (N), (E))
/* Prototypes. */
-extern void ospf_nsm_event(struct thread *);
-extern void ospf_check_nbr_loading(struct ospf_neighbor *);
-extern int ospf_db_summary_isempty(struct ospf_neighbor *);
-extern int ospf_db_summary_count(struct ospf_neighbor *);
-extern void ospf_db_summary_clear(struct ospf_neighbor *);
+extern void ospf_nsm_event(struct event *e);
+extern void ospf_check_nbr_loading(struct ospf_neighbor *nbr);
+extern int ospf_db_summary_isempty(struct ospf_neighbor *nbr);
+extern int ospf_db_summary_count(struct ospf_neighbor *nbr);
+extern void ospf_db_summary_clear(struct ospf_neighbor *nbr);
extern int nsm_should_adj(struct ospf_neighbor *nbr);
DECLARE_HOOK(ospf_nsm_change,
(struct ospf_neighbor * on, int state, int oldstate),
#include "vty.h"
#include "stream.h"
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "hash.h"
#include "sockunion.h" /* for inet_aton() */
#include "printfrr.h"
void ospf_opaque_type9_lsa_term(struct ospf_interface *oi)
{
- THREAD_OFF(oi->t_opaque_lsa_self);
+ EVENT_OFF(oi->t_opaque_lsa_self);
if (oi->opaque_lsa_self != NULL)
list_delete(&oi->opaque_lsa_self);
oi->opaque_lsa_self = NULL;
area->lsdb->new_lsa_hook = area->lsdb->del_lsa_hook = NULL;
#endif /* MONITOR_LSDB_CHANGE */
- THREAD_OFF(area->t_opaque_lsa_self);
+ EVENT_OFF(area->t_opaque_lsa_self);
if (area->opaque_lsa_self != NULL)
list_delete(&area->opaque_lsa_self);
return;
top->lsdb->new_lsa_hook = top->lsdb->del_lsa_hook = NULL;
#endif /* MONITOR_LSDB_CHANGE */
- THREAD_OFF(top->t_opaque_lsa_self);
+ EVENT_OFF(top->t_opaque_lsa_self);
if (top->opaque_lsa_self != NULL)
list_delete(&top->opaque_lsa_self);
return;
* to (re-)originate their own Opaque-LSAs out-of-sync with others.
* This thread is prepared for that specific purpose.
*/
- struct thread *t_opaque_lsa_self;
+ struct event *t_opaque_lsa_self;
/*
* Backpointer to an "owner" which is LSA-type dependent.
uint32_t opaque_id;
/* Thread for refresh/flush scheduling for this opaque-type/id. */
- struct thread *t_opaque_lsa_self;
+ struct event *t_opaque_lsa_self;
/* Backpointer to Opaque-LSA control information per opaque-type. */
struct opaque_info_per_type *opqctl_type;
ospf_opaque_lsa_flush_schedule(lsa);
}
- THREAD_OFF(oipt->t_opaque_lsa_self);
+ EVENT_OFF(oipt->t_opaque_lsa_self);
list_delete(&oipt->id_list);
if (cleanup_owner) {
/* Remove from its owner's self-originated LSA list. */
{
struct opaque_info_per_id *oipi = (struct opaque_info_per_id *)val;
- THREAD_OFF(oipi->t_opaque_lsa_self);
+ EVENT_OFF(oipi->t_opaque_lsa_self);
if (oipi->lsa != NULL)
ospf_lsa_unlock(&oipi->lsa);
XFREE(MTYPE_OPAQUE_INFO_PER_ID, oipi);
* Following are Opaque-LSA origination/refresh management functions.
*------------------------------------------------------------------------*/
-static void ospf_opaque_type9_lsa_originate(struct thread *t);
-static void ospf_opaque_type10_lsa_originate(struct thread *t);
-static void ospf_opaque_type11_lsa_originate(struct thread *t);
+static void ospf_opaque_type9_lsa_originate(struct event *t);
+static void ospf_opaque_type10_lsa_originate(struct event *t);
+static void ospf_opaque_type11_lsa_originate(struct event *t);
static void ospf_opaque_lsa_reoriginate_resume(struct list *listtop, void *arg);
void ospf_opaque_lsa_originate_schedule(struct ospf_interface *oi, int *delay0)
"Schedule Type-9 Opaque-LSA origination in %d ms later.",
delay);
oi->t_opaque_lsa_self = NULL;
- thread_add_timer_msec(master, ospf_opaque_type9_lsa_originate,
- oi, delay, &oi->t_opaque_lsa_self);
+ event_add_timer_msec(master, ospf_opaque_type9_lsa_originate,
+ oi, delay, &oi->t_opaque_lsa_self);
delay += top->min_ls_interval;
}
"Schedule Type-10 Opaque-LSA origination in %d ms later.",
delay);
area->t_opaque_lsa_self = NULL;
- thread_add_timer_msec(master, ospf_opaque_type10_lsa_originate,
- area, delay, &area->t_opaque_lsa_self);
+ event_add_timer_msec(master, ospf_opaque_type10_lsa_originate,
+ area, delay, &area->t_opaque_lsa_self);
delay += top->min_ls_interval;
}
"Schedule Type-11 Opaque-LSA origination in %d ms later.",
delay);
top->t_opaque_lsa_self = NULL;
- thread_add_timer_msec(master, ospf_opaque_type11_lsa_originate,
- top, delay, &top->t_opaque_lsa_self);
+ event_add_timer_msec(master, ospf_opaque_type11_lsa_originate,
+ top, delay, &top->t_opaque_lsa_self);
delay += top->min_ls_interval;
}
*delay0 = delay;
}
-static void ospf_opaque_type9_lsa_originate(struct thread *t)
+static void ospf_opaque_type9_lsa_originate(struct event *t)
{
struct ospf_interface *oi;
- oi = THREAD_ARG(t);
+ oi = EVENT_ARG(t);
oi->t_opaque_lsa_self = NULL;
if (IS_DEBUG_OSPF_EVENT)
opaque_lsa_originate_callback(ospf_opaque_type9_funclist, oi);
}
-static void ospf_opaque_type10_lsa_originate(struct thread *t)
+static void ospf_opaque_type10_lsa_originate(struct event *t)
{
struct ospf_area *area;
- area = THREAD_ARG(t);
+ area = EVENT_ARG(t);
area->t_opaque_lsa_self = NULL;
if (IS_DEBUG_OSPF_EVENT)
opaque_lsa_originate_callback(ospf_opaque_type10_funclist, area);
}
-static void ospf_opaque_type11_lsa_originate(struct thread *t)
+static void ospf_opaque_type11_lsa_originate(struct event *t)
{
struct ospf *top;
- top = THREAD_ARG(t);
+ top = EVENT_ARG(t);
top->t_opaque_lsa_self = NULL;
if (IS_DEBUG_OSPF_EVENT)
* triggered by external interventions (vty session, signaling, etc).
*------------------------------------------------------------------------*/
-#define OSPF_OPAQUE_TIMER_ON(T,F,L,V) thread_add_timer_msec (master, (F), (L), (V), &(T))
+#define OSPF_OPAQUE_TIMER_ON(T, F, L, V) \
+ event_add_timer_msec(master, (F), (L), (V), &(T))
static struct ospf_lsa *pseudo_lsa(struct ospf_interface *oi,
struct ospf_area *area, uint8_t lsa_type,
uint8_t opaque_type);
-static void ospf_opaque_type9_lsa_reoriginate_timer(struct thread *t);
-static void ospf_opaque_type10_lsa_reoriginate_timer(struct thread *t);
-static void ospf_opaque_type11_lsa_reoriginate_timer(struct thread *t);
-static void ospf_opaque_lsa_refresh_timer(struct thread *t);
+static void ospf_opaque_type9_lsa_reoriginate_timer(struct event *t);
+static void ospf_opaque_type10_lsa_reoriginate_timer(struct event *t);
+static void ospf_opaque_type11_lsa_reoriginate_timer(struct event *t);
+static void ospf_opaque_lsa_refresh_timer(struct event *t);
void ospf_opaque_lsa_reoriginate_schedule(void *lsa_type_dependent,
uint8_t lsa_type, uint8_t opaque_type)
struct ospf_lsa *lsa;
struct opaque_info_per_type *oipt;
- void (*func)(struct thread * t) = NULL;
+ void (*func)(struct event * t) = NULL;
int delay;
switch (lsa_type) {
return &lsa;
}
-static void ospf_opaque_type9_lsa_reoriginate_timer(struct thread *t)
+static void ospf_opaque_type9_lsa_reoriginate_timer(struct event *t)
{
struct opaque_info_per_type *oipt;
struct ospf_opaque_functab *functab;
struct ospf *top;
struct ospf_interface *oi;
- oipt = THREAD_ARG(t);
+ oipt = EVENT_ARG(t);
if ((functab = oipt->functab) == NULL
|| functab->lsa_originator == NULL) {
(*functab->lsa_originator)(oi);
}
-static void ospf_opaque_type10_lsa_reoriginate_timer(struct thread *t)
+static void ospf_opaque_type10_lsa_reoriginate_timer(struct event *t)
{
struct opaque_info_per_type *oipt;
struct ospf_opaque_functab *functab;
struct ospf_interface *oi;
int n;
- oipt = THREAD_ARG(t);
+ oipt = EVENT_ARG(t);
if ((functab = oipt->functab) == NULL
|| functab->lsa_originator == NULL) {
(*functab->lsa_originator)(area);
}
-static void ospf_opaque_type11_lsa_reoriginate_timer(struct thread *t)
+static void ospf_opaque_type11_lsa_reoriginate_timer(struct event *t)
{
struct opaque_info_per_type *oipt;
struct ospf_opaque_functab *functab;
struct ospf *top;
- oipt = THREAD_ARG(t);
+ oipt = EVENT_ARG(t);
if ((functab = oipt->functab) == NULL
|| functab->lsa_originator == NULL) {
return;
}
-static void ospf_opaque_lsa_refresh_timer(struct thread *t)
+static void ospf_opaque_lsa_refresh_timer(struct event *t)
{
struct opaque_info_per_id *oipi;
struct ospf_opaque_functab *functab;
if (IS_DEBUG_OSPF_EVENT)
zlog_debug("Timer[Opaque-LSA]: (Opaque-LSA Refresh expire)");
- oipi = THREAD_ARG(t);
+ oipi = EVENT_ARG(t);
if ((lsa = oipi->lsa) != NULL)
if ((functab = oipi->opqctl_type->functab) != NULL)
#include <zebra.h>
#include "monotime.h"
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "linklist.h"
#include "prefix.h"
}
-static void ospf_ls_req_timer(struct thread *thread)
+static void ospf_ls_req_timer(struct event *thread)
{
struct ospf_neighbor *nbr;
- nbr = THREAD_ARG(thread);
+ nbr = EVENT_ARG(thread);
nbr->t_ls_req = NULL;
/* Send Link State Request. */
void ospf_ls_req_event(struct ospf_neighbor *nbr)
{
- THREAD_OFF(nbr->t_ls_req);
- thread_add_event(master, ospf_ls_req_timer, nbr, 0, &nbr->t_ls_req);
+ EVENT_OFF(nbr->t_ls_req);
+ event_add_event(master, ospf_ls_req_timer, nbr, 0, &nbr->t_ls_req);
}
/* Cyclic timer function. Fist registered in ospf_nbr_new () in
ospf_neighbor.c */
-void ospf_ls_upd_timer(struct thread *thread)
+void ospf_ls_upd_timer(struct event *thread)
{
struct ospf_neighbor *nbr;
- nbr = THREAD_ARG(thread);
+ nbr = EVENT_ARG(thread);
nbr->t_ls_upd = NULL;
/* Send Link State Update. */
OSPF_NSM_TIMER_ON(nbr->t_ls_upd, ospf_ls_upd_timer, nbr->v_ls_upd);
}
-void ospf_ls_ack_timer(struct thread *thread)
+void ospf_ls_ack_timer(struct event *thread)
{
struct ospf_interface *oi;
- oi = THREAD_ARG(thread);
+ oi = EVENT_ARG(thread);
oi->t_ls_ack = NULL;
/* Send Link State Acknowledgment. */
}
#endif /* WANT_OSPF_WRITE_FRAGMENT */
-static void ospf_write(struct thread *thread)
+static void ospf_write(struct event *thread)
{
- struct ospf *ospf = THREAD_ARG(thread);
+ struct ospf *ospf = EVENT_ARG(thread);
struct ospf_interface *oi;
struct ospf_packet *op;
struct sockaddr_in sa_dst;
/* If packets still remain in queue, call write thread. */
if (!list_isempty(ospf->oi_write_q))
- thread_add_write(master, ospf_write, ospf, ospf->fd,
- &ospf->t_write);
+ event_add_write(master, ospf_write, ospf, ospf->fd,
+ &ospf->t_write);
}
/* OSPF Hello message read -- RFC2328 Section 10.5. */
}
/* Starting point of packet process function. */
-void ospf_read(struct thread *thread)
+void ospf_read(struct event *thread)
{
struct ospf *ospf;
int32_t count = 0;
enum ospf_read_return_enum ret;
/* first of all get interface pointer. */
- ospf = THREAD_ARG(thread);
+ ospf = EVENT_ARG(thread);
/* prepare for next packet. */
- thread_add_read(master, ospf_read, ospf, ospf->fd, &ospf->t_read);
+ event_add_read(master, ospf_read, ospf, ospf->fd, &ospf->t_read);
while (count < ospf->write_oi_count) {
count++;
ospf_hello_send_sub(oi, nbr_nbma->addr.s_addr);
}
-void ospf_poll_timer(struct thread *thread)
+void ospf_poll_timer(struct event *thread)
{
struct ospf_nbr_nbma *nbr_nbma;
- nbr_nbma = THREAD_ARG(thread);
+ nbr_nbma = EVENT_ARG(thread);
nbr_nbma->t_poll = NULL;
if (IS_DEBUG_OSPF(nsm, NSM_TIMERS))
}
-void ospf_hello_reply_timer(struct thread *thread)
+void ospf_hello_reply_timer(struct event *thread)
{
struct ospf_neighbor *nbr;
- nbr = THREAD_ARG(thread);
+ nbr = EVENT_ARG(thread);
nbr->t_hello_reply = NULL;
if (IS_DEBUG_OSPF(nsm, NSM_TIMERS))
ospf_packet_add(oi, op);
/* Call ospf_write() right away to send ospf packets to neighbors */
if (send_lsupd_now) {
- struct thread os_packet_thd;
+ struct event os_packet_thd;
os_packet_thd.arg = (void *)oi->ospf;
if (oi->on_write_q == 0) {
* is actually turned off.
*/
if (list_isempty(oi->ospf->oi_write_q))
- THREAD_OFF(oi->ospf->t_write);
+ EVENT_OFF(oi->ospf->t_write);
} else {
/* Hook thread to write packet. */
OSPF_ISM_WRITE_ON(oi->ospf);
}
}
-static void ospf_ls_upd_send_queue_event(struct thread *thread)
+static void ospf_ls_upd_send_queue_event(struct event *thread)
{
- struct ospf_interface *oi = THREAD_ARG(thread);
+ struct ospf_interface *oi = EVENT_ARG(thread);
struct route_node *rn;
struct route_node *rnext;
struct list *update;
"%s: update lists not cleared, %d nodes to try again, raising new event",
__func__, again);
oi->t_ls_upd_event = NULL;
- thread_add_event(master, ospf_ls_upd_send_queue_event, oi, 0,
- &oi->t_ls_upd_event);
+ event_add_event(master, ospf_ls_upd_send_queue_event, oi, 0,
+ &oi->t_ls_upd_event);
}
if (IS_DEBUG_OSPF_EVENT)
rn->p.u.prefix4, 1);
}
} else
- thread_add_event(master, ospf_ls_upd_send_queue_event, oi, 0,
- &oi->t_ls_upd_event);
+ event_add_event(master, ospf_ls_upd_send_queue_event, oi, 0,
+ &oi->t_ls_upd_event);
}
static void ospf_ls_ack_send_list(struct ospf_interface *oi, struct list *ack,
OSPF_ISM_WRITE_ON(oi->ospf);
}
-static void ospf_ls_ack_send_event(struct thread *thread)
+static void ospf_ls_ack_send_event(struct event *thread)
{
- struct ospf_interface *oi = THREAD_ARG(thread);
+ struct ospf_interface *oi = EVENT_ARG(thread);
oi->t_ls_ack_direct = NULL;
listnode_add(oi->ls_ack_direct.ls_ack, ospf_lsa_lock(lsa));
- thread_add_event(master, ospf_ls_ack_send_event, oi, 0,
- &oi->t_ls_ack_direct);
+ event_add_event(master, ospf_ls_ack_send_event, oi, 0,
+ &oi->t_ls_ack_direct);
}
/* Send Link State Acknowledgment delayed. */
extern void ospf_fifo_flush(struct ospf_fifo *);
extern void ospf_fifo_free(struct ospf_fifo *);
-extern void ospf_read(struct thread *thread);
+extern void ospf_read(struct event *thread);
extern void ospf_hello_send(struct ospf_interface *);
extern void ospf_db_desc_send(struct ospf_neighbor *);
extern void ospf_db_desc_resend(struct ospf_neighbor *);
extern void ospf_ls_retransmit(struct ospf_interface *, struct ospf_lsa *);
extern void ospf_ls_req_event(struct ospf_neighbor *);
-extern void ospf_ls_upd_timer(struct thread *thread);
-extern void ospf_ls_ack_timer(struct thread *thread);
-extern void ospf_poll_timer(struct thread *thread);
-extern void ospf_hello_reply_timer(struct thread *thread);
+extern void ospf_ls_upd_timer(struct event *thread);
+extern void ospf_ls_ack_timer(struct event *thread);
+extern void ospf_poll_timer(struct event *thread);
+extern void ospf_hello_reply_timer(struct event *thread);
extern const struct message ospf_packet_type_str[];
extern const size_t ospf_packet_type_str_max;
#include "vty.h"
#include "stream.h"
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "hash.h"
#include "sockunion.h" /* for inet_aton() */
#include "mpls.h"
}
/* Register OSPF2-MIB. */
-static int ospf_snmp_init(struct thread_master *tm)
+static int ospf_snmp_init(struct event_loop *tm)
{
ospf_snmp_iflist = list_new();
ospf_snmp_vl_table = route_table_init();
#include <zebra.h>
#include "monotime.h"
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "hash.h"
#include "linklist.h"
}
/* Worker for SPF calculation scheduler. */
-static void ospf_spf_calculate_schedule_worker(struct thread *thread)
+static void ospf_spf_calculate_schedule_worker(struct event *thread)
{
- struct ospf *ospf = THREAD_ARG(thread);
+ struct ospf *ospf = EVENT_ARG(thread);
struct route_table *new_table, *new_rtrs;
struct route_table *all_rtrs = NULL;
struct timeval start_time, spf_start_time;
zlog_debug("SPF: calculation timer delay = %ld msec", delay);
ospf->t_spf_calc = NULL;
- thread_add_timer_msec(master, ospf_spf_calculate_schedule_worker, ospf,
- delay, &ospf->t_spf_calc);
+ event_add_timer_msec(master, ospf_spf_calculate_schedule_worker, ospf,
+ delay, &ospf->t_spf_calc);
}
/* Restart OSPF SPF algorithm*/
#include "sockunion.h" /* for inet_aton() */
#include "stream.h"
#include "table.h"
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "zclient.h"
#include "sbuf.h"
*
* @return 1 on success
*/
-static void sr_start_label_manager(struct thread *start)
+static void sr_start_label_manager(struct event *start)
{
struct ospf *ospf;
- ospf = THREAD_ARG(start);
+ ospf = EVENT_ARG(start);
/* re-attempt to start SR & Label Manager connection */
ospf_sr_start(ospf);
if (!ospf_zebra_label_manager_ready())
if (ospf_zebra_label_manager_connect() < 0) {
/* Re-attempt to connect to Label Manager in 1 sec. */
- thread_add_timer(master, sr_start_label_manager, ospf,
- 1, &OspfSR.t_start_lm);
+ event_add_timer(master, sr_start_label_manager, ospf, 1,
+ &OspfSR.t_start_lm);
osr_debug(" |- Failed to start the Label Manager");
return -1;
}
osr_debug("SR (%s): Stop Segment Routing", __func__);
/* Disable any re-attempt to connect to Label Manager */
- THREAD_OFF(OspfSR.t_start_lm);
+ EVENT_OFF(OspfSR.t_start_lm);
/* Release SRGB if active */
sr_global_block_delete();
/* Stop Segment Routing */
ospf_sr_stop();
- /* Clear SR Node Table */
- if (OspfSR.neighbors)
- hash_free(OspfSR.neighbors);
-
+ hash_clean_and_free(&OspfSR.neighbors, (void *)sr_node_del);
}
/*
uint8_t msd;
/* Thread timer to start Label Manager */
- struct thread *t_start_lm;
+ struct event *t_start_lm;
};
/* Structure aggregating all received SR info from LSAs by node */
#include "vty.h"
#include "stream.h"
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "hash.h"
#include "sockunion.h" /* for inet_aton() */
#include "network.h"
#include "printfrr.h"
#include "monotime.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "table.h"
#include "vty.h"
for (ALL_LIST_ELEMENTS_RO(ospf->areas, ln, area)) {
SET_FLAG(area->stub_router_state,
OSPF_AREA_WAS_START_STUB_ROUTED);
- THREAD_OFF(area->t_stub_router);
+ EVENT_OFF(area->t_stub_router);
/* Don't trample on admin stub routed */
if (!CHECK_FLAG(area->stub_router_state,
.actual_grace_period);
vty_out(vty,
" Remaining GraceTime:%ld(in seconds).\n",
- thread_timer_remain_second(
+ event_timer_remain_second(
nbr->gr_helper_info
- .t_grace_timer));
+ .t_grace_timer));
vty_out(vty,
" Graceful Restart reason: %s.\n\n",
ospf_restart_reason2str(
.actual_grace_period);
json_object_int_add(
json_neigh, "remainGracetime",
- thread_timer_remain_second(
+ event_timer_remain_second(
nbr->gr_helper_info
- .t_grace_timer));
+ .t_grace_timer));
json_object_string_add(
json_neigh, "restartReason",
ospf_restart_reason2str(
else
ospf->maxage_delay = value;
- THREAD_OFF(ospf->t_maxage);
+ EVENT_OFF(ospf->t_maxage);
OSPF_TIMER_ON(ospf->t_maxage, ospf_maxage_lsa_remover,
ospf->maxage_delay);
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "command.h"
#include "network.h"
#include "prefix.h"
static struct zclient *zclient_sync;
/* For registering threads. */
-extern struct thread_master *master;
+extern struct event_loop *master;
/* Router-id update message from zebra. */
static int ospf_router_id_update_zebra(ZAPI_CALLBACK_ARGS)
* Function to originate or flush default after applying
* route-map on all ei.
*/
-static void ospf_external_lsa_default_routemap_timer(struct thread *thread)
+static void ospf_external_lsa_default_routemap_timer(struct event *thread)
{
struct list *ext_list;
- struct ospf *ospf = THREAD_ARG(thread);
+ struct ospf *ospf = EVENT_ARG(thread);
struct prefix_ipv4 p;
int type;
int ret = 0;
/*
* Check if default needs to be flushed too.
*/
- thread_add_event(master, ospf_external_lsa_default_routemap_timer, ospf,
- 0, &ospf->t_default_routemap_timer);
+ event_add_event(master, ospf_external_lsa_default_routemap_timer, ospf,
+ 0, &ospf->t_default_routemap_timer);
}
/* Update NHLFE for Prefix SID */
* there are any other external info which can still trigger
* default route origination else flush it.
*/
- thread_add_event(master,
- ospf_external_lsa_default_routemap_timer, ospf,
- 0, &ospf->t_default_routemap_timer);
+ event_add_event(master,
+ ospf_external_lsa_default_routemap_timer, ospf,
+ 0, &ospf->t_default_routemap_timer);
}
return true;
}
/* distribute-list update timer. */
-static void ospf_distribute_list_update_timer(struct thread *thread)
+static void ospf_distribute_list_update_timer(struct event *thread)
{
struct route_node *rn;
struct external_info *ei;
struct route_table *rt;
struct ospf_lsa *lsa;
int type, default_refresh = 0;
- struct ospf *ospf = THREAD_ARG(thread);
+ struct ospf *ospf = EVENT_ARG(thread);
if (ospf == NULL)
return;
return;
/* Set timer. If timer is already started, this call does nothing. */
- thread_add_timer_msec(master, ospf_distribute_list_update_timer, ospf,
- ospf->min_ls_interval,
- &ospf->t_distribute_update);
+ event_add_timer_msec(master, ospf_distribute_list_update_timer, ospf,
+ ospf->min_ls_interval, &ospf->t_distribute_update);
}
/* If access-list is updated, apply some check. */
[ZEBRA_CLIENT_CLOSE_NOTIFY] = ospf_zebra_client_close_notify,
};
-void ospf_zebra_init(struct thread_master *master, unsigned short instance)
+void ospf_zebra_init(struct event_loop *master, unsigned short instance)
{
/* Allocate zebra structure. */
zclient = zclient_new(master, &zclient_options_default, ospf_handlers,
const char *, const char *);
extern int ospf_distance_unset(struct vty *, struct ospf *, const char *,
const char *, const char *);
-extern void ospf_zebra_init(struct thread_master *, unsigned short);
+extern void ospf_zebra_init(struct event_loop *m, unsigned short instance);
extern void ospf_zebra_vrf_register(struct ospf *ospf);
extern void ospf_zebra_vrf_deregister(struct ospf *ospf);
bool ospf_external_default_routemap_apply_walk(
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "command.h"
#include "linklist.h"
new->maxage_delay = OSPF_LSA_MAXAGE_REMOVE_DELAY_DEFAULT;
new->maxage_lsa = route_table_init();
new->t_maxage_walker = NULL;
- thread_add_timer(master, ospf_lsa_maxage_walker, new,
- OSPF_LSA_MAXAGE_CHECK_INTERVAL, &new->t_maxage_walker);
+ event_add_timer(master, ospf_lsa_maxage_walker, new,
+ OSPF_LSA_MAXAGE_CHECK_INTERVAL, &new->t_maxage_walker);
/* Max paths initialization */
new->max_multipath = MULTIPATH_NUM;
new->lsa_refresh_interval = OSPF_LSA_REFRESH_INTERVAL_DEFAULT;
new->lsa_refresh_timer = OSPF_LS_REFRESH_TIME;
new->t_lsa_refresher = NULL;
- thread_add_timer(master, ospf_lsa_refresh_walker, new,
- new->lsa_refresh_interval, &new->t_lsa_refresher);
+ event_add_timer(master, ospf_lsa_refresh_walker, new,
+ new->lsa_refresh_interval, &new->t_lsa_refresher);
new->lsa_refresher_started = monotime(NULL);
new->ibuf = stream_new(OSPF_MAX_PACKET_SIZE + 1);
return new;
}
- thread_add_read(master, ospf_read, new, new->fd, &new->t_read);
+ event_add_read(master, ospf_read, new, new->fd, &new->t_read);
new->oi_running = 1;
ospf_router_id_update(new);
static void ospf_deferred_shutdown_finish(struct ospf *ospf)
{
ospf->stub_router_shutdown_time = OSPF_STUB_ROUTER_UNCONFIGURED;
- THREAD_OFF(ospf->t_deferred_shutdown);
+ EVENT_OFF(ospf->t_deferred_shutdown);
ospf_finish_final(ospf);
}
/* Timer thread for G-R */
-static void ospf_deferred_shutdown_timer(struct thread *t)
+static void ospf_deferred_shutdown_timer(struct event *t)
{
- struct ospf *ospf = THREAD_ARG(t);
+ struct ospf *ospf = EVENT_ARG(t);
ospf_deferred_shutdown_finish(ospf);
}
/* Clear static neighbors */
for (rn = route_top(ospf->nbr_nbma); rn; rn = route_next(rn))
if ((nbr_nbma = rn->info)) {
- THREAD_OFF(nbr_nbma->t_poll);
+ EVENT_OFF(nbr_nbma->t_poll);
if (nbr_nbma->nbr) {
nbr_nbma->nbr->nbr_nbma = NULL;
}
/* Cancel all timers. */
- THREAD_OFF(ospf->t_read);
- THREAD_OFF(ospf->t_write);
- THREAD_OFF(ospf->t_spf_calc);
- THREAD_OFF(ospf->t_ase_calc);
- THREAD_OFF(ospf->t_maxage);
- THREAD_OFF(ospf->t_maxage_walker);
- THREAD_OFF(ospf->t_abr_task);
- THREAD_OFF(ospf->t_abr_fr);
- THREAD_OFF(ospf->t_asbr_check);
- THREAD_OFF(ospf->t_asbr_nssa_redist_update);
- THREAD_OFF(ospf->t_distribute_update);
- THREAD_OFF(ospf->t_lsa_refresher);
- THREAD_OFF(ospf->t_opaque_lsa_self);
- THREAD_OFF(ospf->t_sr_update);
- THREAD_OFF(ospf->t_default_routemap_timer);
- THREAD_OFF(ospf->t_external_aggr);
- THREAD_OFF(ospf->gr_info.t_grace_period);
+ EVENT_OFF(ospf->t_read);
+ EVENT_OFF(ospf->t_write);
+ EVENT_OFF(ospf->t_spf_calc);
+ EVENT_OFF(ospf->t_ase_calc);
+ EVENT_OFF(ospf->t_maxage);
+ EVENT_OFF(ospf->t_maxage_walker);
+ EVENT_OFF(ospf->t_abr_task);
+ EVENT_OFF(ospf->t_abr_fr);
+ EVENT_OFF(ospf->t_asbr_check);
+ EVENT_OFF(ospf->t_asbr_nssa_redist_update);
+ EVENT_OFF(ospf->t_distribute_update);
+ EVENT_OFF(ospf->t_lsa_refresher);
+ EVENT_OFF(ospf->t_opaque_lsa_self);
+ EVENT_OFF(ospf->t_sr_update);
+ EVENT_OFF(ospf->t_default_routemap_timer);
+ EVENT_OFF(ospf->t_external_aggr);
+ EVENT_OFF(ospf->gr_info.t_grace_period);
LSDB_LOOP (OPAQUE_AS_LSDB(ospf), rn, lsa)
ospf_discard_from_db(ospf, ospf->lsdb, lsa);
free(IMPORT_NAME(area));
/* Cancel timer. */
- THREAD_OFF(area->t_stub_router);
- THREAD_OFF(area->t_opaque_lsa_self);
+ EVENT_OFF(area->t_stub_router);
+ EVENT_OFF(area->t_opaque_lsa_self);
if (OSPF_IS_AREA_BACKBONE(area))
area->ospf->backbone = NULL;
}
/* remove update event */
- THREAD_OFF(oi->t_ls_upd_event);
+ EVENT_OFF(oi->t_ls_upd_event);
}
void ospf_if_update(struct ospf *ospf, struct interface *ifp)
- (monotime(NULL) - ospf->lsa_refresher_started);
if (time_left > interval) {
- THREAD_OFF(ospf->t_lsa_refresher);
- thread_add_timer(master, ospf_lsa_refresh_walker, ospf,
- interval, &ospf->t_lsa_refresher);
+ EVENT_OFF(ospf->t_lsa_refresher);
+ event_add_timer(master, ospf_lsa_refresh_walker, ospf, interval,
+ &ospf->t_lsa_refresher);
}
ospf->lsa_refresh_interval = interval;
- (monotime(NULL) - ospf->lsa_refresher_started);
if (time_left > OSPF_LSA_REFRESH_INTERVAL_DEFAULT) {
- THREAD_OFF(ospf->t_lsa_refresher);
+ EVENT_OFF(ospf->t_lsa_refresher);
ospf->t_lsa_refresher = NULL;
- thread_add_timer(master, ospf_lsa_refresh_walker, ospf,
- OSPF_LSA_REFRESH_INTERVAL_DEFAULT,
- &ospf->t_lsa_refresher);
+ event_add_timer(master, ospf_lsa_refresh_walker, ospf,
+ OSPF_LSA_REFRESH_INTERVAL_DEFAULT,
+ &ospf->t_lsa_refresher);
}
ospf->lsa_refresh_interval = OSPF_LSA_REFRESH_INTERVAL_DEFAULT;
static void ospf_nbr_nbma_down(struct ospf_nbr_nbma *nbr_nbma)
{
- THREAD_OFF(nbr_nbma->t_poll);
+ EVENT_OFF(nbr_nbma->t_poll);
if (nbr_nbma->nbr) {
nbr_nbma->nbr->nbr_nbma = NULL;
if (nbr_nbma->v_poll != interval) {
nbr_nbma->v_poll = interval;
if (nbr_nbma->oi && ospf_if_is_up(nbr_nbma->oi)) {
- THREAD_OFF(nbr_nbma->t_poll);
+ EVENT_OFF(nbr_nbma->t_poll);
OSPF_POLL_TIMER_ON(nbr_nbma->t_poll, ospf_poll_timer,
nbr_nbma->v_poll);
}
return 1;
}
-void ospf_master_init(struct thread_master *master)
+void ospf_master_init(struct event_loop *master)
{
memset(&ospf_master, 0, sizeof(ospf_master));
ret = ospf_sock_init(ospf);
if (ret < 0 || ospf->fd <= 0)
return 0;
- thread_add_read(master, ospf_read, ospf, ospf->fd,
- &ospf->t_read);
+ event_add_read(master, ospf_read, ospf, ospf->fd,
+ &ospf->t_read);
ospf->oi_running = 1;
ospf_router_id_update(ospf);
}
if (IS_DEBUG_OSPF_EVENT)
zlog_debug("%s: ospf old_vrf_id %d unlinked", __func__,
old_vrf_id);
- THREAD_OFF(ospf->t_read);
+ EVENT_OFF(ospf->t_read);
close(ospf->fd);
ospf->fd = -1;
}
struct list *ospf;
/* OSPF thread master. */
- struct thread_master *master;
+ struct event_loop *master;
/* Various OSPF global configuration. */
uint8_t options;
bool prepare_in_progress;
bool finishing_restart;
uint32_t grace_period;
- struct thread *t_grace_period;
+ struct event *t_grace_period;
};
/* OSPF instance structure. */
int redistribute; /* Num of redistributed protocols. */
/* Threads. */
- struct thread *t_abr_task; /* ABR task timer. */
- struct thread *t_abr_fr; /* ABR FR timer. */
- struct thread *t_asbr_check; /* ASBR check timer. */
- struct thread *t_asbr_nssa_redist_update; /* ASBR NSSA redistribution
+ struct event *t_abr_task; /* ABR task timer. */
+ struct event *t_abr_fr; /* ABR FR timer. */
+ struct event *t_asbr_check; /* ASBR check timer. */
+ struct event *t_asbr_nssa_redist_update; /* ASBR NSSA redistribution
update timer. */
- struct thread *t_distribute_update; /* Distirbute list update timer. */
- struct thread *t_spf_calc; /* SPF calculation timer. */
- struct thread *t_ase_calc; /* ASE calculation timer. */
- struct thread
- *t_opaque_lsa_self; /* Type-11 Opaque-LSAs origin event. */
- struct thread *t_sr_update; /* Segment Routing update timer */
+ struct event *t_distribute_update; /* Distirbute list update timer. */
+ struct event *t_spf_calc; /* SPF calculation timer. */
+ struct event *t_ase_calc; /* ASE calculation timer. */
+ struct event *t_opaque_lsa_self; /* Type-11 Opaque-LSAs origin event. */
+ struct event *t_sr_update; /* Segment Routing update timer */
unsigned int maxage_delay; /* Delay on Maxage remover timer, sec */
- struct thread *t_maxage; /* MaxAge LSA remover timer. */
- struct thread *t_maxage_walker; /* MaxAge LSA checking timer. */
+ struct event *t_maxage; /* MaxAge LSA remover timer. */
+ struct event *t_maxage_walker; /* MaxAge LSA checking timer. */
- struct thread
+ struct event
*t_deferred_shutdown; /* deferred/stub-router shutdown timer*/
- struct thread *t_write;
+ struct event *t_write;
#define OSPF_WRITE_INTERFACE_COUNT_DEFAULT 20
- struct thread *t_default_routemap_timer;
+ struct event *t_default_routemap_timer;
int write_oi_count; /* Num of packets sent per thread invocation */
- struct thread *t_read;
+ struct event *t_read;
int fd;
struct stream *ibuf;
struct list *oi_write_q;
struct list *qs[OSPF_LSA_REFRESHER_SLOTS];
} lsa_refresh_queue;
- struct thread *t_lsa_refresher;
+ struct event *t_lsa_refresher;
time_t lsa_refresher_started;
#define OSPF_LSA_REFRESH_INTERVAL_DEFAULT 10
uint16_t lsa_refresh_interval;
/* delay timer to process external routes
* with summary address.
*/
- struct thread *t_external_aggr;
+ struct event *t_external_aggr;
/* delay interval in seconds */
uint16_t aggr_delay_interval;
struct p_spaces_head *p_spaces;
/* Threads. */
- struct thread *t_stub_router; /* Stub-router timer */
- struct thread *t_opaque_lsa_self; /* Type-10 Opaque-LSAs origin. */
+ struct event *t_stub_router; /* Stub-router timer */
+ struct event *t_opaque_lsa_self; /* Type-10 Opaque-LSAs origin. */
/* Statistics field. */
uint32_t spf_calculation; /* SPF Calculation Count. */
uint32_t v_poll;
/* Poll timer thread. */
- struct thread *t_poll;
+ struct event *t_poll;
/* State change. */
uint32_t state_change;
#define LSA_OPTIONS_NSSA_GET(area) \
(((area)->external_routing == OSPF_AREA_NSSA) ? OSPF_OPTION_NP : 0)
-#define OSPF_TIMER_ON(T,F,V) thread_add_timer (master,(F),ospf,(V),&(T))
-#define OSPF_AREA_TIMER_ON(T,F,V) thread_add_timer (master, (F), area, (V), &(T))
-#define OSPF_POLL_TIMER_ON(T,F,V) thread_add_timer (master, (F), nbr_nbma, (V), &(T))
+#define OSPF_TIMER_ON(T, F, V) event_add_timer(master, (F), ospf, (V), &(T))
+#define OSPF_AREA_TIMER_ON(T, F, V) \
+ event_add_timer(master, (F), area, (V), &(T))
+#define OSPF_POLL_TIMER_ON(T, F, V) \
+ event_add_timer(master, (F), nbr_nbma, (V), &(T))
/* Extern variables. */
extern struct ospf_master *om;
extern unsigned short ospf_instance;
extern const int ospf_redistributed_proto_max;
extern struct zclient *zclient;
-extern struct thread_master *master;
+extern struct event_loop *master;
extern int ospf_zlog;
extern struct zebra_privs_t ospfd_privs;
extern void ospf_route_map_init(void);
-extern void ospf_master_init(struct thread_master *master);
+extern void ospf_master_init(struct event_loop *master);
extern void ospf_vrf_init(void);
extern void ospf_vrf_terminate(void);
extern void ospf_vrf_link(struct ospf *ospf, struct vrf *vrf);
#include <lib/version.h>
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include "command.h"
#include "log.h"
#include "memory.h"
struct option longopts[] = {{0}};
/* Master of threads. */
-struct thread_master *master;
+struct event_loop *master;
static struct frr_daemon_info pathd_di;
/* Module Functions */
static int pcep_module_finish(void);
-static int pcep_module_late_init(struct thread_master *tm);
+static int pcep_module_late_init(struct event_loop *tm);
static int pcep_module_init(void);
/* ------------ Path Helper Functions ------------ */
* run before config load, so the CLI commands don't try to touch things that
* aren't set up yet...
*/
-static int pcep_module_config_pre(struct thread_master *tm)
+static int pcep_module_config_pre(struct event_loop *tm)
{
assert(pcep_g->fpt == NULL);
assert(pcep_g->master == NULL);
return 0;
}
-static int pcep_module_late_init(struct thread_master *tm)
+static int pcep_module_late_init(struct event_loop *tm)
{
hook_register(pathd_candidate_created, pathd_candidate_created_handler);
hook_register(pathd_candidate_updated, pathd_candidate_updated_handler);
struct pcep_glob {
struct debug dbg;
- struct thread_master *master;
+ struct event_loop *master;
struct frr_pthread *fpt;
uint8_t num_pce_opts_cli;
struct pce_opts_cli *pce_opts_cli[MAX_PCE];
#include "pathd/path_pcep.h"
#include "pathd/path_pcep_config.h"
#include "pathd/path_pcep_debug.h"
-#include "thread.h"
+#include "frrevent.h"
#define MAX_XPATH 256
#define MAX_FLOAT_LEN 22
/* Internal Functions Called From Main Thread */
static int pcep_ctrl_halt_cb(struct frr_pthread *fpt, void **res);
-static void pcep_refine_path_event_cb(struct thread *thread);
+static void pcep_refine_path_event_cb(struct event *thread);
/* Internal Functions Called From Controller Thread */
-static void pcep_thread_finish_event_handler(struct thread *thread);
+static void pcep_thread_finish_event_handler(struct event *thread);
/* Controller Thread Timer Handler */
static int schedule_thread_timer(struct ctrl_state *ctrl_state, int pcc_id,
enum pcep_ctrl_timer_type timer_type,
enum pcep_ctrl_timeout_type timeout_type,
uint32_t delay, void *payload,
- struct thread **thread);
+ struct event **thread);
static int schedule_thread_timer_with_cb(
struct ctrl_state *ctrl_state, int pcc_id,
enum pcep_ctrl_timer_type timer_type,
enum pcep_ctrl_timeout_type timeout_type, uint32_t delay, void *payload,
- struct thread **thread, pcep_ctrl_thread_callback timer_cb);
-static void pcep_thread_timer_handler(struct thread *thread);
+ struct event **thread, pcep_ctrl_thread_callback timer_cb);
+static void pcep_thread_timer_handler(struct event *thread);
/* Controller Thread Socket read/write Handler */
static int schedule_thread_socket(struct ctrl_state *ctrl_state, int pcc_id,
enum pcep_ctrl_socket_type type, bool is_read,
- void *payload, int fd, struct thread **thread,
+ void *payload, int fd, struct event **thread,
pcep_ctrl_thread_callback cb);
/* Controller Thread Event Handler */
enum pcep_ctrl_event_type type,
uint32_t sub_type, void *payload,
pcep_ctrl_thread_callback event_cb);
-static void pcep_thread_event_handler(struct thread *thread);
+static void pcep_thread_event_handler(struct event *thread);
static int pcep_thread_event_update_pcc_options(struct ctrl_state *ctrl_state,
struct pcc_opts *opts);
static int pcep_thread_event_update_pce_options(struct ctrl_state *ctrl_state,
/* Main Thread Event Handler */
static int send_to_main(struct ctrl_state *ctrl_state, int pcc_id,
enum pcep_main_event_type type, void *payload);
-static void pcep_main_event_handler(struct thread *thread);
+static void pcep_main_event_handler(struct event *thread);
/* Helper functions */
static void set_ctrl_state(struct frr_pthread *fpt,
/* ------------ API Functions Called from Main Thread ------------ */
-int pcep_ctrl_initialize(struct thread_master *main_thread,
+int pcep_ctrl_initialize(struct event_loop *main_thread,
struct frr_pthread **fpt,
pcep_main_event_handler_t event_handler)
{
int pcep_ctrl_halt_cb(struct frr_pthread *fpt, void **res)
{
- thread_add_event(fpt->master, pcep_thread_finish_event_handler,
- (void *)fpt, 0, NULL);
+ event_add_event(fpt->master, pcep_thread_finish_event_handler,
+ (void *)fpt, 0, NULL);
pthread_join(fpt->thread, res);
return 0;
}
-void pcep_refine_path_event_cb(struct thread *thread)
+void pcep_refine_path_event_cb(struct event *thread)
{
- struct pcep_refine_path_event_data *data = THREAD_ARG(thread);
+ struct pcep_refine_path_event_data *data = EVENT_ARG(thread);
assert(data != NULL);
struct ctrl_state *ctrl_state = data->ctrl_state;
struct path *path = data->path;
void pcep_thread_schedule_sync_best_pce(struct ctrl_state *ctrl_state,
int pcc_id, int delay,
- struct thread **thread)
+ struct event **thread)
{
schedule_thread_timer(ctrl_state, pcc_id, TM_CALCULATE_BEST_PCE,
TO_UNDEFINED, delay, NULL, thread);
}
-void pcep_thread_cancel_timer(struct thread **thread)
+void pcep_thread_cancel_timer(struct event **thread)
{
if (thread == NULL || *thread == NULL) {
return;
}
- struct pcep_ctrl_timer_data *data = THREAD_ARG(*thread);
+ struct pcep_ctrl_timer_data *data = EVENT_ARG(*thread);
PCEP_DEBUG("Timer %s / %s canceled", timer_type_name(data->timer_type),
timeout_type_name(data->timeout_type));
if (data != NULL) {
}
if ((*thread)->master->owner == pthread_self()) {
- thread_cancel(thread);
+ event_cancel(thread);
} else {
- thread_cancel_async((*thread)->master, thread, NULL);
+ event_cancel_async((*thread)->master, thread, NULL);
}
}
void pcep_thread_schedule_reconnect(struct ctrl_state *ctrl_state, int pcc_id,
- int retry_count, struct thread **thread)
+ int retry_count, struct event **thread)
{
uint32_t delay = backoff_delay(MAX_RECONNECT_DELAY, 1, retry_count);
PCEP_DEBUG("Schedule RECONNECT_PCC for %us (retry %u)", delay,
void pcep_thread_schedule_timeout(struct ctrl_state *ctrl_state, int pcc_id,
enum pcep_ctrl_timeout_type timeout_type,
uint32_t delay, void *param,
- struct thread **thread)
+ struct event **thread)
{
assert(timeout_type > TO_UNDEFINED);
assert(timeout_type < TO_MAX);
void pcep_thread_schedule_pceplib_timer(struct ctrl_state *ctrl_state,
int delay, void *payload,
- struct thread **thread,
+ struct event **thread,
pcep_ctrl_thread_callback timer_cb)
{
PCEP_DEBUG("Schedule PCEPLIB_TIMER for %us", delay);
void pcep_thread_schedule_session_timeout(struct ctrl_state *ctrl_state,
int pcc_id, int delay,
- struct thread **thread)
+ struct event **thread)
{
PCEP_DEBUG("Schedule session_timeout interval for %us", delay);
schedule_thread_timer(ctrl_state, pcc_id, TM_SESSION_TIMEOUT_PCC,
data->continue_lsp_update_handler = cb;
data->payload = payload;
- thread_add_event(ctrl_state->main, pcep_refine_path_event_cb,
- (void *)data, 0, NULL);
+ event_add_event(ctrl_state->main, pcep_refine_path_event_cb,
+ (void *)data, 0, NULL);
return 0;
}
/* ------------ Internal Functions Called From Controller Thread ------------ */
-void pcep_thread_finish_event_handler(struct thread *thread)
+void pcep_thread_finish_event_handler(struct event *thread)
{
int i;
- struct frr_pthread *fpt = THREAD_ARG(thread);
+ struct frr_pthread *fpt = EVENT_ARG(thread);
struct ctrl_state *ctrl_state = fpt->data;
assert(ctrl_state != NULL);
enum pcep_ctrl_timer_type timer_type,
enum pcep_ctrl_timeout_type timeout_type,
uint32_t delay, void *payload,
- struct thread **thread,
+ struct event **thread,
pcep_ctrl_thread_callback timer_cb)
{
assert(thread != NULL);
data->pcc_id = pcc_id;
data->payload = payload;
- thread_add_timer(ctrl_state->self, timer_cb, (void *)data, delay,
- thread);
+ event_add_timer(ctrl_state->self, timer_cb, (void *)data, delay,
+ thread);
return 0;
}
int schedule_thread_timer(struct ctrl_state *ctrl_state, int pcc_id,
enum pcep_ctrl_timer_type timer_type,
enum pcep_ctrl_timeout_type timeout_type,
- uint32_t delay, void *payload, struct thread **thread)
+ uint32_t delay, void *payload, struct event **thread)
{
return schedule_thread_timer_with_cb(ctrl_state, pcc_id, timer_type,
timeout_type, delay, payload,
thread, pcep_thread_timer_handler);
}
-void pcep_thread_timer_handler(struct thread *thread)
+void pcep_thread_timer_handler(struct event *thread)
{
/* data unpacking */
- struct pcep_ctrl_timer_data *data = THREAD_ARG(thread);
+ struct pcep_ctrl_timer_data *data = EVENT_ARG(thread);
assert(data != NULL);
struct ctrl_state *ctrl_state = data->ctrl_state;
assert(ctrl_state != NULL);
}
}
-void pcep_thread_pcep_event(struct thread *thread)
+void pcep_thread_pcep_event(struct event *thread)
{
- struct pcep_ctrl_event_data *data = THREAD_ARG(thread);
+ struct pcep_ctrl_event_data *data = EVENT_ARG(thread);
assert(data != NULL);
struct ctrl_state *ctrl_state = data->ctrl_state;
pcep_event *event = data->payload;
int schedule_thread_socket(struct ctrl_state *ctrl_state, int pcc_id,
enum pcep_ctrl_socket_type type, bool is_read,
- void *payload, int fd, struct thread **thread,
+ void *payload, int fd, struct event **thread,
pcep_ctrl_thread_callback socket_cb)
{
assert(thread != NULL);
data->payload = payload;
if (is_read) {
- thread_add_read(ctrl_state->self, socket_cb, (void *)data, fd,
- thread);
+ event_add_read(ctrl_state->self, socket_cb, (void *)data, fd,
+ thread);
} else {
- thread_add_write(ctrl_state->self, socket_cb, (void *)data, fd,
- thread);
+ event_add_write(ctrl_state->self, socket_cb, (void *)data, fd,
+ thread);
}
return 0;
struct ctrl_state *ctrl_state = ((struct frr_pthread *)fpt)->data;
return schedule_thread_socket(ctrl_state, 0, SOCK_PCEPLIB, false,
- payload, fd, (struct thread **)thread,
+ payload, fd, (struct event **)thread,
socket_cb);
}
struct ctrl_state *ctrl_state = ((struct frr_pthread *)fpt)->data;
return schedule_thread_socket(ctrl_state, 0, SOCK_PCEPLIB, true,
- payload, fd, (struct thread **)thread,
+ payload, fd, (struct event **)thread,
socket_cb);
}
data->pcc_id = pcc_id;
data->payload = payload;
- thread_add_event(ctrl_state->self, event_cb, (void *)data, 0, NULL);
+ event_add_event(ctrl_state->self, event_cb, (void *)data, 0, NULL);
return 0;
}
-void pcep_thread_event_handler(struct thread *thread)
+void pcep_thread_event_handler(struct event *thread)
{
/* data unpacking */
- struct pcep_ctrl_event_data *data = THREAD_ARG(thread);
+ struct pcep_ctrl_event_data *data = EVENT_ARG(thread);
assert(data != NULL);
struct ctrl_state *ctrl_state = data->ctrl_state;
assert(ctrl_state != NULL);
data->pcc_id = pcc_id;
data->payload = payload;
- thread_add_event(ctrl_state->main, pcep_main_event_handler,
- (void *)data, 0, NULL);
+ event_add_event(ctrl_state->main, pcep_main_event_handler, (void *)data,
+ 0, NULL);
return 0;
}
-void pcep_main_event_handler(struct thread *thread)
+void pcep_main_event_handler(struct event *thread)
{
/* data unpacking */
- struct pcep_main_event_data *data = THREAD_ARG(thread);
+ struct pcep_main_event_data *data = EVENT_ARG(thread);
assert(data != NULL);
pcep_main_event_handler_t handler = data->handler;
enum pcep_main_event_type type = data->type;
};
struct ctrl_state {
- struct thread_master *main;
- struct thread_master *self;
+ struct event_loop *main;
+ struct event_loop *self;
pcep_main_event_handler_t main_event_handler;
struct pcc_opts *pcc_opts;
int pcc_count;
void *payload;
};
-typedef void (*pcep_ctrl_thread_callback)(struct thread *);
+typedef void (*pcep_ctrl_thread_callback)(struct event *);
/* PCC connection information, populated in a thread-safe
* manner with pcep_ctrl_get_pcc_info() */
};
/* Functions called from the main thread */
-int pcep_ctrl_initialize(struct thread_master *main_thread,
+int pcep_ctrl_initialize(struct event_loop *main_thread,
struct frr_pthread **fpt,
pcep_main_event_handler_t event_handler);
int pcep_ctrl_finalize(struct frr_pthread **fpt);
struct path *path);
void pcep_thread_initiate_path(struct ctrl_state *ctrl_state, int pcc_id,
struct path *path);
-void pcep_thread_cancel_timer(struct thread **thread);
+void pcep_thread_cancel_timer(struct event **thread);
void pcep_thread_schedule_reconnect(struct ctrl_state *ctrl_state, int pcc_id,
- int retry_count, struct thread **thread);
+ int retry_count, struct event **thread);
void pcep_thread_schedule_timeout(struct ctrl_state *ctrl_state, int pcc_id,
enum pcep_ctrl_timeout_type type,
uint32_t delay, void *param,
- struct thread **thread);
+ struct event **thread);
void pcep_thread_schedule_session_timeout(struct ctrl_state *ctrl_state,
int pcc_id, int delay,
- struct thread **thread);
+ struct event **thread);
void pcep_thread_remove_candidate_path_segments(struct ctrl_state *ctrl_state,
struct pcc_state *pcc_state);
void pcep_thread_schedule_sync_best_pce(struct ctrl_state *ctrl_state,
int pcc_id, int delay,
- struct thread **thread);
+ struct event **thread);
void pcep_thread_schedule_pceplib_timer(struct ctrl_state *ctrl_state,
int delay, void *payload,
- struct thread **thread,
+ struct event **thread,
pcep_ctrl_thread_callback cb);
int pcep_thread_socket_read(void *fpt, void **thread, int fd, void *payload,
pcep_ctrl_thread_callback cb);
int pcep_thread_send_ctrl_event(void *fpt, void *payload,
pcep_ctrl_thread_callback cb);
-void pcep_thread_pcep_event(struct thread *thread);
+void pcep_thread_pcep_event(struct event *thread);
int pcep_thread_pcc_count(struct ctrl_state *ctrl_state);
/* Called by the PCC to refine a path in the main thread */
int pcep_thread_refine_path(struct ctrl_state *ctrl_state, int pcc_id,
void *payload);
static int pcep_lib_pceplib_socket_write_cb(void *fpt, void **thread, int fd,
void *payload);
-static void pcep_lib_socket_read_ready(struct thread *thread);
-static void pcep_lib_socket_write_ready(struct thread *thread);
+static void pcep_lib_socket_read_ready(struct event *thread);
+static void pcep_lib_socket_write_ready(struct event *thread);
/* pceplib pcep_event callbacks */
static void pcep_lib_pceplib_event_cb(void *fpt, pcep_event *event);
/* Callbacks called by path_pcep_controller when a socket is ready to read/write
*/
-void pcep_lib_socket_write_ready(struct thread *thread)
+void pcep_lib_socket_write_ready(struct event *thread)
{
- struct pcep_ctrl_socket_data *data = THREAD_ARG(thread);
+ struct pcep_ctrl_socket_data *data = EVENT_ARG(thread);
assert(data != NULL);
pceplib_external_socket_write(data->fd, data->payload);
XFREE(MTYPE_PCEP, data);
}
-void pcep_lib_socket_read_ready(struct thread *thread)
+void pcep_lib_socket_read_ready(struct event *thread)
{
- struct pcep_ctrl_socket_data *data = THREAD_ARG(thread);
+ struct pcep_ctrl_socket_data *data = EVENT_ARG(thread);
assert(data != NULL);
pceplib_external_socket_read(data->fd, data->payload);
}
if (pcc_state->t_reconnect != NULL) {
- thread_cancel(&pcc_state->t_reconnect);
+ event_cancel(&pcc_state->t_reconnect);
pcc_state->t_reconnect = NULL;
}
if (pcc_state->t_update_best != NULL) {
- thread_cancel(&pcc_state->t_update_best);
+ event_cancel(&pcc_state->t_update_best);
pcc_state->t_update_best = NULL;
}
if (pcc_state->t_session_timeout != NULL) {
- thread_cancel(&pcc_state->t_session_timeout);
+ event_cancel(&pcc_state->t_session_timeout);
pcc_state->t_session_timeout = NULL;
}
assert(pcc_state->sess == NULL);
if (pcc_state->t_reconnect != NULL) {
- thread_cancel(&pcc_state->t_reconnect);
+ event_cancel(&pcc_state->t_reconnect);
pcc_state->t_reconnect = NULL;
}
// In case some best pce alternative were waiting to activate
if (pcc_state->t_update_best != NULL) {
- thread_cancel(&pcc_state->t_update_best);
+ event_cancel(&pcc_state->t_update_best);
pcc_state->t_update_best = NULL;
}
struct req_entry {
RB_ENTRY(req_entry) entry;
- struct thread *t_retry;
+ struct event *t_retry;
int retry_count;
bool was_sent;
struct path *path;
pcep_session *sess;
uint32_t retry_count;
bool synchronized;
- struct thread *t_reconnect;
- struct thread *t_update_best;
- struct thread *t_session_timeout;
+ struct event *t_reconnect;
+ struct event *t_update_best;
+ struct event *t_session_timeout;
uint32_t next_reqid;
uint32_t next_plspid;
struct plspid_map_head plspid_map;
static uint32_t path_ted_start_importing_igp(const char *daemon_str);
static uint32_t path_ted_stop_importing_igp(void);
static enum zclient_send_status path_ted_link_state_sync(void);
-static void path_ted_timer_handler_sync(struct thread *thread);
-static void path_ted_timer_handler_refresh(struct thread *thread);
+static void path_ted_timer_handler_sync(struct event *thread);
+static void path_ted_timer_handler_refresh(struct event *thread);
static int path_ted_cli_debug_config_write(struct vty *vty);
static int path_ted_cli_debug_set_all(uint32_t flags, bool set);
* path_path_ted public API function implementations
*/
-void path_ted_init(struct thread_master *master)
+void path_ted_init(struct event_loop *master)
{
ted_state_g.main = master;
ted_state_g.link_state_delay_interval = TIMER_RETRY_DELAY;
PATH_TED_DEBUG("%s: PATHD-TED: Opaque asked for TED sync ",
__func__);
}
- thread_add_timer(ted_state_g.main, path_ted_timer_handler_sync,
- &ted_state_g, ted_state_g.link_state_delay_interval,
- &ted_state_g.t_link_state_sync);
+ event_add_timer(ted_state_g.main, path_ted_timer_handler_sync,
+ &ted_state_g, ted_state_g.link_state_delay_interval,
+ &ted_state_g.t_link_state_sync);
return status;
}
*
* @return status
*/
-void path_ted_timer_handler_sync(struct thread *thread)
+void path_ted_timer_handler_sync(struct event *thread)
{
/* data unpacking */
- struct ted_state *data = THREAD_ARG(thread);
+ struct ted_state *data = EVENT_ARG(thread);
assert(data != NULL);
/* Retry the sync */
int status = 0;
path_ted_timer_refresh_cancel();
- thread_add_timer(ted_state_g.main, path_ted_timer_handler_refresh,
- &ted_state_g,
- ted_state_g.segment_list_refresh_interval,
- &ted_state_g.t_segment_list_refresh);
+ event_add_timer(ted_state_g.main, path_ted_timer_handler_refresh,
+ &ted_state_g, ted_state_g.segment_list_refresh_interval,
+ &ted_state_g.t_segment_list_refresh);
return status;
}
*
* @return status
*/
-void path_ted_timer_handler_refresh(struct thread *thread)
+void path_ted_timer_handler_refresh(struct event *thread)
{
if (!path_ted_is_initialized())
return;
PATH_TED_DEBUG("%s: PATHD-TED: Refresh sid from current TED", __func__);
/* data unpacking */
- struct ted_state *data = THREAD_ARG(thread);
+ struct ted_state *data = EVENT_ARG(thread);
assert(data != NULL);
void path_ted_timer_sync_cancel(void)
{
if (ted_state_g.t_link_state_sync != NULL) {
- thread_cancel(&ted_state_g.t_link_state_sync);
+ event_cancel(&ted_state_g.t_link_state_sync);
ted_state_g.t_link_state_sync = NULL;
}
}
void path_ted_timer_refresh_cancel(void)
{
if (ted_state_g.t_segment_list_refresh != NULL) {
- thread_cancel(&ted_state_g.t_segment_list_refresh);
+ event_cancel(&ted_state_g.t_segment_list_refresh);
ted_state_g.t_segment_list_refresh = NULL;
}
}
IMPORT_OSPFv3
};
struct ted_state {
- struct thread_master *main;
+ struct event_loop *main;
/* Status of TED: enable or disable */
bool enabled;
/* From which igp is going to receive data */
/* The TED itself as in link_state.h */
struct ls_ted *ted;
/* Timer for ted sync */
- struct thread *t_link_state_sync;
+ struct event *t_link_state_sync;
/* Timer for refresh sid in segment list */
- struct thread *t_segment_list_refresh;
+ struct event *t_segment_list_refresh;
/* delay interval in seconds */
uint32_t link_state_delay_interval;
/* delay interval refresh in seconds */
/* TED management functions */
bool path_ted_is_initialized(void);
-void path_ted_init(struct thread_master *master);
+void path_ted_init(struct event_loop *master);
uint32_t path_ted_teardown(void);
void path_ted_timer_sync_cancel(void);
void path_ted_timer_refresh_cancel(void);
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "log.h"
#include "lib_errors.h"
#include "if.h"
*
* @param master The master thread
*/
-void path_zebra_init(struct thread_master *master)
+void path_zebra_init(struct event_loop *master)
{
struct zclient_options options = zclient_options_default;
options.synchronous = true;
void path_zebra_delete_sr_policy(struct srte_policy *policy);
int path_zebra_request_label(mpls_label_t label);
void path_zebra_release_label(mpls_label_t label);
-void path_zebra_init(struct thread_master *master);
+void path_zebra_init(struct event_loop *master);
void path_zebra_stop(void);
#endif /* _FRR_PATH_MPLS_H_ */
static void trigger_pathd_candidate_created(struct srte_candidate *candidate);
-static void trigger_pathd_candidate_created_timer(struct thread *thread);
+static void trigger_pathd_candidate_created_timer(struct event *thread);
static void trigger_pathd_candidate_updated(struct srte_candidate *candidate);
-static void trigger_pathd_candidate_updated_timer(struct thread *thread);
+static void trigger_pathd_candidate_updated_timer(struct event *thread);
static void trigger_pathd_candidate_removed(struct srte_candidate *candidate);
static const char *
srte_candidate_metric_name(enum srte_candidate_metric_type type);
from changing the candidate by hand with the console */
if (candidate->hook_timer != NULL)
return;
- thread_add_timer(master, trigger_pathd_candidate_created_timer,
- (void *)candidate, HOOK_DELAY, &candidate->hook_timer);
+ event_add_timer(master, trigger_pathd_candidate_created_timer,
+ (void *)candidate, HOOK_DELAY, &candidate->hook_timer);
}
-void trigger_pathd_candidate_created_timer(struct thread *thread)
+void trigger_pathd_candidate_created_timer(struct event *thread)
{
- struct srte_candidate *candidate = THREAD_ARG(thread);
+ struct srte_candidate *candidate = EVENT_ARG(thread);
candidate->hook_timer = NULL;
hook_call(pathd_candidate_created, candidate);
}
from changing the candidate by hand with the console */
if (candidate->hook_timer != NULL)
return;
- thread_add_timer(master, trigger_pathd_candidate_updated_timer,
- (void *)candidate, HOOK_DELAY, &candidate->hook_timer);
+ event_add_timer(master, trigger_pathd_candidate_updated_timer,
+ (void *)candidate, HOOK_DELAY, &candidate->hook_timer);
}
-void trigger_pathd_candidate_updated_timer(struct thread *thread)
+void trigger_pathd_candidate_updated_timer(struct event *thread)
{
- struct srte_candidate *candidate = THREAD_ARG(thread);
+ struct srte_candidate *candidate = EVENT_ARG(thread);
candidate->hook_timer = NULL;
hook_call(pathd_candidate_updated, candidate);
}
/* The hook needs to be call synchronously, otherwise the candidate
path will be already deleted when the handler is called */
if (candidate->hook_timer != NULL) {
- thread_cancel(&candidate->hook_timer);
+ event_cancel(&candidate->hook_timer);
candidate->hook_timer = NULL;
}
hook_call(pathd_candidate_removed, candidate);
uint32_t affinity_filters[MAX_AFFINITY_FILTER_TYPE];
/* Hooks delaying timer */
- struct thread *hook_timer;
+ struct event *hook_timer;
};
RB_HEAD(srte_candidate_head, srte_candidate);
extern struct zebra_privs_t pathd_privs;
/* master thread, defined in path_main.c */
-extern struct thread_master *master;
+extern struct event_loop *master;
/* pathd.c */
struct srte_segment_list *srte_segment_list_add(const char *name);
#include <lib/version.h>
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "linklist.h"
#include "if.h"
struct option longopts[] = { { 0 } };
/* Master of threads. */
-struct thread_master *master;
+struct event_loop *master;
/* SIGHUP handler. */
static void sighup(void)
*/
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "prefix.h"
#include "table.h"
*/
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "command.h"
#include "network.h"
#include "prefix.h"
char mapname[100];
};
-extern struct thread_master *master;
+extern struct event_loop *master;
extern void pbr_zebra_init(void);
"Source address\n")
{
char xpath[XPATH_MAXLEN];
+ struct ipaddr group_addr = {0};
+
+ (void)str2ipaddr(group_str, &group_addr);
+
+ if (!IN6_IS_ADDR_MULTICAST(&group_addr)) {
+ vty_out(vty, "Invalid Multicast Address\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
if (source_str) {
if (IPV6_ADDR_SAME(&source, &in6addr_any)) {
#include "lib/jhash.h"
#include "lib/prefix.h"
#include "lib/checksum.h"
-#include "lib/thread.h"
+#include "lib/frrevent.h"
#include "termtable.h"
#include "pimd/pim6_mld.h"
DEFINE_MTYPE_STATIC(PIMD, GM_GRP_PENDING, "MLD group query state");
DEFINE_MTYPE_STATIC(PIMD, GM_GSQ_PENDING, "MLD group/source query aggregate");
-static void gm_t_query(struct thread *t);
+static void gm_t_query(struct event *t);
static void gm_trigger_specific(struct gm_sg *sg);
static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
struct timeval expire_wait);
static void gm_sg_free(struct gm_sg *sg)
{
/* t_sg_expiry is handled before this is reached */
- THREAD_OFF(sg->t_sg_query);
+ EVENT_OFF(sg->t_sg_query);
gm_packet_sg_subs_fini(sg->subs_negative);
gm_packet_sg_subs_fini(sg->subs_positive);
XFREE(MTYPE_GM_SG, sg);
gm_expiry_calc(&timers);
gm_sg_timer_start(gm_ifp, sg, timers.expire_wait);
- THREAD_OFF(sg->t_sg_query);
+ EVENT_OFF(sg->t_sg_query);
sg->n_query = gm_ifp->cur_lmqc;
sg->query_sbit = false;
gm_trigger_specific(sg);
* another path.
*/
if (has_expired)
- THREAD_OFF(sg->t_sg_expire);
+ EVENT_OFF(sg->t_sg_expire);
assertf((!sg->t_sg_expire &&
!gm_packet_sg_subs_count(sg->subs_positive) &&
{
if (sg->t_sg_expire && PIM_DEBUG_GM_TRACE)
zlog_debug(log_sg(sg, "alive, cancelling expiry timer"));
- THREAD_OFF(sg->t_sg_expire);
+ EVENT_OFF(sg->t_sg_expire);
sg->query_sbit = true;
}
* its own path too and won't hit this. This is really only triggered when a
* host straight up disappears.
*/
-static void gm_t_expire(struct thread *t)
+static void gm_t_expire(struct event *t)
{
- struct gm_if *gm_ifp = THREAD_ARG(t);
+ struct gm_if *gm_ifp = EVENT_ARG(t);
struct gm_packet_state *pkt;
zlog_info(log_ifp("general expiry timer"));
log_ifp("next general expiry in %" PRId64 "ms"),
remain_ms / 1000);
- thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
- &remain, &gm_ifp->t_expire);
+ event_add_timer_tv(router->master, gm_t_expire, gm_ifp,
+ &remain, &gm_ifp->t_expire);
return;
}
gm_ifp->n_pending--;
if (!gm_ifp->n_pending)
- THREAD_OFF(gm_ifp->t_expire);
+ EVENT_OFF(gm_ifp->t_expire);
}
/* people might be messing with their configs or something */
zlog_debug(
log_ifp("starting general timer @ 0: %pTVMu"),
&pend->expiry);
- thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
- &timers->expire_wait, &gm_ifp->t_expire);
+ event_add_timer_tv(router->master, gm_t_expire, gm_ifp,
+ &timers->expire_wait, &gm_ifp->t_expire);
} else if (PIM_DEBUG_GM_TRACE)
zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
gm_ifp->n_pending, &pend->expiry);
}
-static void gm_t_sg_expire(struct thread *t)
+static void gm_t_sg_expire(struct event *t)
{
- struct gm_sg *sg = THREAD_ARG(t);
+ struct gm_sg *sg = EVENT_ARG(t);
struct gm_if *gm_ifp = sg->iface;
struct gm_packet_sg *item;
if (sg->t_sg_expire) {
struct timeval remain;
- remain = thread_timer_remain(sg->t_sg_expire);
+ remain = event_timer_remain(sg->t_sg_expire);
if (timercmp(&remain, &expire_wait, <=))
return;
- THREAD_OFF(sg->t_sg_expire);
+ EVENT_OFF(sg->t_sg_expire);
}
- thread_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
- &sg->t_sg_expire);
+ event_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
+ &sg->t_sg_expire);
}
static void gm_handle_q_groupsrc(struct gm_if *gm_ifp,
}
}
-static void gm_t_grp_expire(struct thread *t)
+static void gm_t_grp_expire(struct event *t)
{
/* if we're here, that means when we received the group-specific query
* there was one or more active S,G for this group. For *,G the timer
* receive a report, so that work is left to gm_t_sg_expire and we
* shouldn't worry about it here.
*/
- struct gm_grp_pending *pend = THREAD_ARG(t);
+ struct gm_grp_pending *pend = EVENT_ARG(t);
struct gm_if *gm_ifp = pend->iface;
struct gm_sg *sg, *sg_start, sg_ref = {};
* parallel. But if we received nothing for the *,G query,
* the S,G query is kinda irrelevant.
*/
- THREAD_OFF(sg->t_sg_expire);
+ EVENT_OFF(sg->t_sg_expire);
frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
/* this will also drop the EXCLUDE S,G lists */
if (pend) {
struct timeval remain;
- remain = thread_timer_remain(pend->t_expire);
+ remain = event_timer_remain(pend->t_expire);
if (timercmp(&remain, &timers->expire_wait, <=))
return;
- THREAD_OFF(pend->t_expire);
+ EVENT_OFF(pend->t_expire);
} else {
pend = XCALLOC(MTYPE_GM_GRP_PENDING, sizeof(*pend));
pend->grp = grp;
}
monotime(&pend->query);
- thread_add_timer_tv(router->master, gm_t_grp_expire, pend,
- &timers->expire_wait, &pend->t_expire);
+ event_add_timer_tv(router->master, gm_t_grp_expire, pend,
+ &timers->expire_wait, &pend->t_expire);
if (PIM_DEBUG_GM_TRACE)
zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp,
{
struct pim_interface *pim_ifp = gm_ifp->ifp->info;
- THREAD_OFF(gm_ifp->t_query);
+ EVENT_OFF(gm_ifp->t_query);
if (pim_addr_is_any(pim_ifp->ll_lowest))
return;
gm_ifp->n_startup = gm_ifp->cur_qrv;
- thread_execute(router->master, gm_t_query, gm_ifp, 0);
+ event_execute(router->master, gm_t_query, gm_ifp, 0);
}
-static void gm_t_other_querier(struct thread *t)
+static void gm_t_other_querier(struct event *t)
{
- struct gm_if *gm_ifp = THREAD_ARG(t);
+ struct gm_if *gm_ifp = EVENT_ARG(t);
struct pim_interface *pim_ifp = gm_ifp->ifp->info;
zlog_info(log_ifp("other querier timer expired"));
gm_ifp->querier = pim_ifp->ll_lowest;
gm_ifp->n_startup = gm_ifp->cur_qrv;
- thread_execute(router->master, gm_t_query, gm_ifp, 0);
+ event_execute(router->master, gm_t_query, gm_ifp, 0);
}
static void gm_handle_query(struct gm_if *gm_ifp,
if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &pim_ifp->ll_lowest) < 0) {
unsigned int other_ms;
- THREAD_OFF(gm_ifp->t_query);
- THREAD_OFF(gm_ifp->t_other_querier);
+ EVENT_OFF(gm_ifp->t_query);
+ EVENT_OFF(gm_ifp->t_other_querier);
other_ms = timers.qrv * timers.qqic_ms + timers.max_resp_ms / 2;
- thread_add_timer_msec(router->master, gm_t_other_querier,
- gm_ifp, other_ms,
- &gm_ifp->t_other_querier);
+ event_add_timer_msec(router->master, gm_t_other_querier, gm_ifp,
+ other_ms, &gm_ifp->t_other_querier);
}
if (len == sizeof(struct mld_v1_pkt)) {
return false;
}
-static void gm_t_recv(struct thread *t)
+static void gm_t_recv(struct event *t)
{
- struct pim_instance *pim = THREAD_ARG(t);
+ struct pim_instance *pim = EVENT_ARG(t);
union {
char buf[CMSG_SPACE(sizeof(struct in6_pktinfo)) +
CMSG_SPACE(256) /* hop options */ +
ssize_t nread;
size_t pktlen;
- thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
- &pim->t_gm_recv);
+ event_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
+ &pim->t_gm_recv);
iov->iov_base = rxbuf;
iov->iov_len = sizeof(rxbuf);
}
}
-static void gm_t_query(struct thread *t)
+static void gm_t_query(struct event *t)
{
- struct gm_if *gm_ifp = THREAD_ARG(t);
+ struct gm_if *gm_ifp = EVENT_ARG(t);
unsigned int timer_ms = gm_ifp->cur_query_intv;
if (gm_ifp->n_startup) {
gm_ifp->n_startup--;
}
- thread_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
- &gm_ifp->t_query);
+ event_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
+ &gm_ifp->t_query);
gm_send_query(gm_ifp, PIMADDR_ANY, NULL, 0, false);
}
-static void gm_t_sg_query(struct thread *t)
+static void gm_t_sg_query(struct event *t)
{
- struct gm_sg *sg = THREAD_ARG(t);
+ struct gm_sg *sg = EVENT_ARG(t);
gm_trigger_specific(sg);
}
XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
}
-static void gm_t_gsq_pend(struct thread *t)
+static void gm_t_gsq_pend(struct event *t)
{
- struct gm_gsq_pending *pend_gsq = THREAD_ARG(t);
+ struct gm_gsq_pending *pend_gsq = EVENT_ARG(t);
gm_send_specific(pend_gsq);
}
sg->n_query--;
if (sg->n_query)
- thread_add_timer_msec(router->master, gm_t_sg_query, sg,
- gm_ifp->cur_query_intv_trig,
- &sg->t_sg_query);
+ event_add_timer_msec(router->master, gm_t_sg_query, sg,
+ gm_ifp->cur_query_intv_trig,
+ &sg->t_sg_query);
if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
return;
pend_gsq->iface = gm_ifp;
gm_gsq_pends_add(gm_ifp->gsq_pends, pend_gsq);
- thread_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
- &gm_ifp->cfg_timing_fuzz,
- &pend_gsq->t_send);
+ event_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
+ &gm_ifp->cfg_timing_fuzz, &pend_gsq->t_send);
}
assert(pend_gsq->n_src < array_size(pend_gsq->srcs));
pend_gsq->n_src++;
if (pend_gsq->n_src == array_size(pend_gsq->srcs)) {
- THREAD_OFF(pend_gsq->t_send);
+ EVENT_OFF(pend_gsq->t_send);
gm_send_specific(pend_gsq);
pend_gsq = NULL;
}
vrf->name);
}
- thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
- &pim->t_gm_recv);
+ event_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
+ &pim->t_gm_recv);
}
static void gm_vrf_socket_decref(struct pim_instance *pim)
if (--pim->gm_socket_if_count)
return;
- THREAD_OFF(pim->t_gm_recv);
+ EVENT_OFF(pim->t_gm_recv);
close(pim->gm_socket);
pim->gm_socket = -1;
}
gm_packet_drop(pkt, false);
while ((pend_grp = gm_grp_pends_pop(gm_ifp->grp_pends))) {
- THREAD_OFF(pend_grp->t_expire);
+ EVENT_OFF(pend_grp->t_expire);
XFREE(MTYPE_GM_GRP_PENDING, pend_grp);
}
while ((pend_gsq = gm_gsq_pends_pop(gm_ifp->gsq_pends))) {
- THREAD_OFF(pend_gsq->t_send);
+ EVENT_OFF(pend_gsq->t_send);
XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
}
while ((sg = gm_sgs_pop(gm_ifp->sgs))) {
- THREAD_OFF(sg->t_sg_expire);
+ EVENT_OFF(sg->t_sg_expire);
assertf(!gm_packet_sg_subs_count(sg->subs_negative), "%pSG",
&sg->sgaddr);
assertf(!gm_packet_sg_subs_count(sg->subs_positive), "%pSG",
if (PIM_DEBUG_GM_EVENTS)
zlog_debug(log_ifp("MLD stop"));
- THREAD_OFF(gm_ifp->t_query);
- THREAD_OFF(gm_ifp->t_other_querier);
- THREAD_OFF(gm_ifp->t_expire);
+ EVENT_OFF(gm_ifp->t_query);
+ EVENT_OFF(gm_ifp->t_other_querier);
+ EVENT_OFF(gm_ifp->t_expire);
frr_with_privs (&pimd_privs) {
struct ipv6_mreq mreq;
gm_ifp->cur_ll_lowest = pim_ifp->ll_lowest;
if (was_querier)
gm_ifp->querier = pim_ifp->ll_lowest;
- THREAD_OFF(gm_ifp->t_query);
+ EVENT_OFF(gm_ifp->t_query);
if (pim_addr_is_any(gm_ifp->cur_ll_lowest)) {
if (was_querier)
return;
gm_ifp->n_startup = gm_ifp->cur_qrv;
- thread_execute(router->master, gm_t_query, gm_ifp, 0);
+ event_execute(router->master, gm_t_query, gm_ifp, 0);
}
void gm_ifp_update(struct interface *ifp)
#include "typesafe.h"
#include "pim_addr.h"
-struct thread;
+struct event;
struct pim_instance;
struct gm_packet_sg;
struct gm_if;
* (implies we haven't received any report yet, since it's cancelled
* by that)
*/
- struct thread *t_sg_expire;
+ struct event *t_sg_expire;
/* last-member-left triggered queries (group/group-source specific)
*
* this timer will be running even if we aren't the elected querier,
* in case the election result changes midway through.
*/
- struct thread *t_sg_query;
+ struct event *t_sg_query;
/* we must keep sending (QRV) queries even if we get a positive
* response, to make sure other routers are updated. query_sbit
pim_addr grp;
struct timeval query;
- struct thread *t_expire;
+ struct event *t_expire;
};
/* guaranteed MTU for IPv6 is 1280 bytes. IPv6 header is 40 bytes, MLDv2
struct gm_gsq_pends_item itm;
struct gm_if *iface;
- struct thread *t_send;
+ struct event *t_send;
pim_addr grp;
bool s_bit;
struct gm_if {
struct interface *ifp;
struct pim_instance *pim;
- struct thread *t_query, *t_other_querier, *t_expire;
+ struct event *t_query, *t_other_querier, *t_expire;
bool stopping;
return pim_assert_do(ch, metric);
}
-static void on_assert_timer(struct thread *t)
+static void on_assert_timer(struct event *t)
{
struct pim_ifchannel *ch;
struct interface *ifp;
- ch = THREAD_ARG(t);
+ ch = EVENT_ARG(t);
ifp = ch->interface;
__func__, ch->sg_str, ch->interface->name);
}
}
- THREAD_OFF(ch->t_ifassert_timer);
+ EVENT_OFF(ch->t_ifassert_timer);
}
static void pim_assert_timer_set(struct pim_ifchannel *ch, int interval)
__func__, ch->sg_str, interval, ch->interface->name);
}
- thread_add_timer(router->master, on_assert_timer, ch, interval,
- &ch->t_ifassert_timer);
+ event_add_timer(router->master, on_assert_timer, ch, interval,
+ &ch->t_ifassert_timer);
}
static void pim_assert_timer_reset(struct pim_ifchannel *ch)
static void pim_bsm_rpinfo_free(struct bsm_rpinfo *bsrp_info)
{
- THREAD_OFF(bsrp_info->g2rp_timer);
+ EVENT_OFF(bsrp_info->g2rp_timer);
XFREE(MTYPE_PIM_BSRP_INFO, bsrp_info);
}
return bsgrp;
}
-static void pim_on_bs_timer(struct thread *t)
+static void pim_on_bs_timer(struct event *t)
{
struct route_node *rn;
struct bsm_scope *scope;
struct bsgrp_node *bsgrp_node;
struct bsm_rpinfo *bsrp;
- scope = THREAD_ARG(t);
- THREAD_OFF(scope->bs_timer);
+ scope = EVENT_ARG(t);
+ EVENT_OFF(scope->bs_timer);
if (PIM_DEBUG_BSM)
zlog_debug("%s: Bootstrap Timer expired for scope: %d",
if (PIM_DEBUG_BSM)
zlog_debug("%s : BS timer being stopped of sz: %d", __func__,
scope->sz_id);
- THREAD_OFF(scope->bs_timer);
+ EVENT_OFF(scope->bs_timer);
}
static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout)
zlog_debug("%s : Invalid scope(NULL).", __func__);
return;
}
- THREAD_OFF(scope->bs_timer);
+ EVENT_OFF(scope->bs_timer);
if (PIM_DEBUG_BSM)
zlog_debug(
"%s : starting bs timer for scope %d with timeout %d secs",
__func__, scope->sz_id, bs_timeout);
- thread_add_timer(router->master, pim_on_bs_timer, scope, bs_timeout,
- &scope->bs_timer);
+ event_add_timer(router->master, pim_on_bs_timer, scope, bs_timeout,
+ &scope->bs_timer);
}
static inline void pim_bs_timer_restart(struct bsm_scope *scope, int bs_timeout)
return true;
}
-static void pim_on_g2rp_timer(struct thread *t)
+static void pim_on_g2rp_timer(struct event *t)
{
struct bsm_rpinfo *bsrp;
struct bsm_rpinfo *bsrp_node;
uint16_t elapse;
pim_addr bsrp_addr;
- bsrp = THREAD_ARG(t);
- THREAD_OFF(bsrp->g2rp_timer);
+ bsrp = EVENT_ARG(t);
+ EVENT_OFF(bsrp->g2rp_timer);
bsgrp_node = bsrp->bsgrp_node;
/* elapse time is the hold time of expired node */
zlog_debug("%s : Invalid brsp(NULL).", __func__);
return;
}
- THREAD_OFF(bsrp->g2rp_timer);
+ EVENT_OFF(bsrp->g2rp_timer);
if (PIM_DEBUG_BSM)
zlog_debug(
"%s : starting g2rp timer for grp: %pFX - rp: %pPAs with timeout %d secs(Actual Hold time : %d secs)",
__func__, &bsrp->bsgrp_node->group, &bsrp->rp_address,
hold_time, bsrp->rp_holdtime);
- thread_add_timer(router->master, pim_on_g2rp_timer, bsrp, hold_time,
- &bsrp->g2rp_timer);
+ event_add_timer(router->master, pim_on_g2rp_timer, bsrp, hold_time,
+ &bsrp->g2rp_timer);
}
static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
__func__, &bsrp->bsgrp_node->group,
&bsrp->rp_address);
- THREAD_OFF(bsrp->g2rp_timer);
+ EVENT_OFF(bsrp->g2rp_timer);
}
static bool is_hold_time_zero(void *data)
struct bsm_frags_head bsm_frags[1];
struct route_table *bsrp_table; /* group2rp mapping rcvd from BSR */
- struct thread *bs_timer; /* Boot strap timer */
+ struct event *bs_timer; /* Boot strap timer */
};
/* BSM packet (= fragment) - this is stored as list in bsm_frags inside scope
uint16_t rp_holdtime; /* RP holdtime - g2rp timer value */
pim_addr rp_address; /* RP Address */
struct bsgrp_node *bsgrp_node; /* Back ptr to bsgrp_node */
- struct thread *g2rp_timer; /* Run only for elected RP node */
+ struct event *g2rp_timer; /* Run only for elected RP node */
};
extern int pim_bsm_rpinfo_cmp(const struct bsm_rpinfo *a,
struct gm_if *mld;
int pim_sock_fd; /* PIM socket file descriptor */
- struct thread *t_pim_sock_read; /* thread for reading PIM socket */
+ struct event *t_pim_sock_read; /* thread for reading PIM socket */
int64_t pim_sock_creation; /* timestamp of PIM socket creation */
- struct thread *t_pim_hello_timer;
+ struct event *t_pim_hello_timer;
int pim_hello_period;
int pim_default_holdtime;
int pim_triggered_hello_delay;
#include <zebra.h>
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "if.h"
#include "vrf.h"
ch->upstream = NULL;
- THREAD_OFF(ch->t_ifjoin_expiry_timer);
- THREAD_OFF(ch->t_ifjoin_prune_pending_timer);
- THREAD_OFF(ch->t_ifassert_timer);
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
+ EVENT_OFF(ch->t_ifjoin_prune_pending_timer);
+ EVENT_OFF(ch->t_ifassert_timer);
if (ch->parent) {
listnode_delete(ch->parent->sources, ch);
*/
void reset_ifassert_state(struct pim_ifchannel *ch)
{
- THREAD_OFF(ch->t_ifassert_timer);
+ EVENT_OFF(ch->t_ifassert_timer);
pim_ifassert_winner_set(ch, PIM_IFASSERT_NOINFO, PIMADDR_ANY,
router->infinite_assert_metric);
delete_on_noinfo(ch);
}
-static void on_ifjoin_expiry_timer(struct thread *t)
+static void on_ifjoin_expiry_timer(struct event *t)
{
struct pim_ifchannel *ch;
- ch = THREAD_ARG(t);
+ ch = EVENT_ARG(t);
if (PIM_DEBUG_PIM_TRACE)
zlog_debug("%s: ifchannel %s expiry timer", __func__,
/* ch may have been deleted */
}
-static void on_ifjoin_prune_pending_timer(struct thread *t)
+static void on_ifjoin_prune_pending_timer(struct event *t)
{
struct pim_ifchannel *ch;
int send_prune_echo; /* boolean */
struct interface *ifp;
struct pim_interface *pim_ifp;
- ch = THREAD_ARG(t);
+ ch = EVENT_ARG(t);
if (PIM_DEBUG_PIM_TRACE)
zlog_debug("%s: IFCHANNEL%pSG %s Prune Pending Timer Popped",
previously received join message with holdtime=0xFFFF.
*/
if (ch->t_ifjoin_expiry_timer) {
- unsigned long remain = thread_timer_remain_second(
+ unsigned long remain = event_timer_remain_second(
ch->t_ifjoin_expiry_timer);
if (remain > holdtime) {
/*
return;
}
}
- THREAD_OFF(ch->t_ifjoin_expiry_timer);
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
break;
case PIM_IFJOIN_PRUNE:
if (source_flags & PIM_ENCODE_RPT_BIT) {
pim_ifchannel_ifjoin_switch(__func__, ch,
PIM_IFJOIN_NOINFO);
- THREAD_OFF(ch->t_ifjoin_expiry_timer);
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
delete_on_noinfo(ch);
return;
} else
* maximum of its current value and the HoldTime from the
* triggering Join/Prune message.
*/
- THREAD_OFF(ch->t_ifjoin_prune_pending_timer);
+ EVENT_OFF(ch->t_ifjoin_prune_pending_timer);
/* Check if SGRpt join Received */
if ((source_flags & PIM_ENCODE_RPT_BIT) &&
* I transitions to the NoInfo state.The ET and PPT are
* cancelled.
*/
- THREAD_OFF(ch->t_ifjoin_expiry_timer);
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
pim_ifchannel_ifjoin_switch(__func__, ch,
PIM_IFJOIN_NOINFO);
return;
pim_ifchannel_ifjoin_handler(ch, pim_ifp);
if (ch->t_ifjoin_expiry_timer) {
- unsigned long remain = thread_timer_remain_second(
+ unsigned long remain = event_timer_remain_second(
ch->t_ifjoin_expiry_timer);
if (remain > holdtime)
return;
}
- THREAD_OFF(ch->t_ifjoin_expiry_timer);
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
break;
case PIM_IFJOIN_PRUNE_TMP:
}
if (holdtime != 0xFFFF) {
- thread_add_timer(router->master, on_ifjoin_expiry_timer, ch,
- holdtime, &ch->t_ifjoin_expiry_timer);
+ event_add_timer(router->master, on_ifjoin_expiry_timer, ch,
+ holdtime, &ch->t_ifjoin_expiry_timer);
}
}
be taken not to use "ch" afterwards since it would be
deleted. */
- THREAD_OFF(ch->t_ifjoin_prune_pending_timer);
- THREAD_OFF(ch->t_ifjoin_expiry_timer);
- thread_add_timer_msec(
- router->master, on_ifjoin_prune_pending_timer,
- ch, jp_override_interval_msec,
- &ch->t_ifjoin_prune_pending_timer);
- thread_add_timer(router->master, on_ifjoin_expiry_timer,
- ch, holdtime,
- &ch->t_ifjoin_expiry_timer);
+ EVENT_OFF(ch->t_ifjoin_prune_pending_timer);
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
+ event_add_timer_msec(router->master,
+ on_ifjoin_prune_pending_timer, ch,
+ jp_override_interval_msec,
+ &ch->t_ifjoin_prune_pending_timer);
+ event_add_timer(router->master, on_ifjoin_expiry_timer,
+ ch, holdtime,
+ &ch->t_ifjoin_expiry_timer);
pim_upstream_update_join_desired(pim_ifp->pim,
ch->upstream);
}
/* If we called ifjoin_prune() directly instead, care should
be taken not to use "ch" afterwards since it would be
deleted. */
- THREAD_OFF(ch->t_ifjoin_prune_pending_timer);
- thread_add_timer_msec(router->master,
- on_ifjoin_prune_pending_timer, ch,
- jp_override_interval_msec,
- &ch->t_ifjoin_prune_pending_timer);
+ EVENT_OFF(ch->t_ifjoin_prune_pending_timer);
+ event_add_timer_msec(router->master,
+ on_ifjoin_prune_pending_timer, ch,
+ jp_override_interval_msec,
+ &ch->t_ifjoin_prune_pending_timer);
break;
case PIM_IFJOIN_PRUNE:
if (source_flags & PIM_ENCODE_RPT_BIT) {
- THREAD_OFF(ch->t_ifjoin_prune_pending_timer);
+ EVENT_OFF(ch->t_ifjoin_prune_pending_timer);
/*
* While in Prune State, Receive SGRpt Prune.
* RFC 7761 Sec 4.5.3:
* Join/Prune message.
*/
if (ch->t_ifjoin_expiry_timer) {
- unsigned long rem = thread_timer_remain_second(
+ unsigned long rem = event_timer_remain_second(
ch->t_ifjoin_expiry_timer);
if (rem > holdtime)
return;
- THREAD_OFF(ch->t_ifjoin_expiry_timer);
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
}
- thread_add_timer(router->master, on_ifjoin_expiry_timer,
- ch, holdtime,
- &ch->t_ifjoin_expiry_timer);
+ event_add_timer(router->master, on_ifjoin_expiry_timer,
+ ch, holdtime,
+ &ch->t_ifjoin_expiry_timer);
}
break;
case PIM_IFJOIN_PRUNE_TMP:
if (source_flags & PIM_ENCODE_RPT_BIT) {
ch->ifjoin_state = PIM_IFJOIN_PRUNE;
- THREAD_OFF(ch->t_ifjoin_expiry_timer);
- thread_add_timer(router->master, on_ifjoin_expiry_timer,
- ch, holdtime,
- &ch->t_ifjoin_expiry_timer);
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
+ event_add_timer(router->master, on_ifjoin_expiry_timer,
+ ch, holdtime,
+ &ch->t_ifjoin_expiry_timer);
}
break;
case PIM_IFJOIN_PRUNE_PENDING_TMP:
if (source_flags & PIM_ENCODE_RPT_BIT) {
ch->ifjoin_state = PIM_IFJOIN_PRUNE_PENDING;
- THREAD_OFF(ch->t_ifjoin_expiry_timer);
- thread_add_timer(router->master, on_ifjoin_expiry_timer,
- ch, holdtime,
- &ch->t_ifjoin_expiry_timer);
+ EVENT_OFF(ch->t_ifjoin_expiry_timer);
+ event_add_timer(router->master, on_ifjoin_expiry_timer,
+ ch, holdtime,
+ &ch->t_ifjoin_expiry_timer);
}
break;
}
break;
if (child->ifjoin_state == PIM_IFJOIN_PRUNE_PENDING_TMP)
- THREAD_OFF(child->t_ifjoin_prune_pending_timer);
- THREAD_OFF(child->t_ifjoin_expiry_timer);
+ EVENT_OFF(child->t_ifjoin_prune_pending_timer);
+ EVENT_OFF(child->t_ifjoin_expiry_timer);
PIM_IF_FLAG_UNSET_S_G_RPT(child->flags);
child->ifjoin_state = PIM_IFJOIN_NOINFO;
/* Per-interface (S,G) Join/Prune State (Section 4.1.4 of RFC4601) */
enum pim_ifjoin_state ifjoin_state;
- struct thread *t_ifjoin_expiry_timer;
- struct thread *t_ifjoin_prune_pending_timer;
+ struct event *t_ifjoin_expiry_timer;
+ struct event *t_ifjoin_prune_pending_timer;
int64_t ifjoin_creation; /* Record uptime of ifjoin state */
/* Per-interface (S,G) Assert State (Section 4.6.1 of RFC4601) */
enum pim_ifassert_state ifassert_state;
- struct thread *t_ifassert_timer;
+ struct event *t_ifassert_timer;
pim_addr ifassert_winner;
struct pim_assert_metric ifassert_winner_metric;
int64_t ifassert_creation; /* Record uptime of ifassert state */
#include "pim_tib.h"
static void group_timer_off(struct gm_group *group);
-static void pim_igmp_general_query(struct thread *t);
+static void pim_igmp_general_query(struct event *t);
void igmp_anysource_forward_start(struct pim_instance *pim,
struct gm_group *group)
return NULL;
}
-static void pim_igmp_other_querier_expire(struct thread *t)
+static void pim_igmp_other_querier_expire(struct event *t)
{
struct gm_sock *igmp;
- igmp = THREAD_ARG(t);
+ igmp = EVENT_ARG(t);
assert(!igmp->t_igmp_query_timer);
"Querier %s resetting TIMER event for Other-Querier-Present",
ifaddr_str);
}
- THREAD_OFF(igmp->t_other_querier_timer);
+ EVENT_OFF(igmp->t_other_querier_timer);
} else {
/*
We are the current querier, then stop sending general queries:
other_querier_present_interval_msec % 1000);
}
- thread_add_timer_msec(router->master, pim_igmp_other_querier_expire,
- igmp, other_querier_present_interval_msec,
- &igmp->t_other_querier_timer);
+ event_add_timer_msec(router->master, pim_igmp_other_querier_expire,
+ igmp, other_querier_present_interval_msec,
+ &igmp->t_other_querier_timer);
}
void pim_igmp_other_querier_timer_off(struct gm_sock *igmp)
ifaddr_str, igmp->fd, igmp->interface->name);
}
}
- THREAD_OFF(igmp->t_other_querier_timer);
+ EVENT_OFF(igmp->t_other_querier_timer);
}
int igmp_validate_checksum(char *igmp_msg, int igmp_msg_len)
ifaddr_str, query_interval,
startup_mode ? "startup" : "non-startup", igmp->fd);
}
- thread_add_timer(router->master, pim_igmp_general_query, igmp,
- query_interval, &igmp->t_igmp_query_timer);
+ event_add_timer(router->master, pim_igmp_general_query, igmp,
+ query_interval, &igmp->t_igmp_query_timer);
}
void pim_igmp_general_query_off(struct gm_sock *igmp)
ifaddr_str, igmp->fd, igmp->interface->name);
}
}
- THREAD_OFF(igmp->t_igmp_query_timer);
+ EVENT_OFF(igmp->t_igmp_query_timer);
}
/* Issue IGMP general query */
-static void pim_igmp_general_query(struct thread *t)
+static void pim_igmp_general_query(struct event *t)
{
struct gm_sock *igmp;
struct in_addr dst_addr;
struct pim_interface *pim_ifp;
int query_buf_size;
- igmp = THREAD_ARG(t);
+ igmp = EVENT_ARG(t);
assert(igmp->interface);
assert(igmp->interface->info);
igmp->interface->name);
}
}
- THREAD_OFF(igmp->t_igmp_read);
+ EVENT_OFF(igmp->t_igmp_read);
if (close(igmp->fd)) {
flog_err(
igmp_source_delete(src);
}
- THREAD_OFF(group->t_group_query_retransmit_timer);
+ EVENT_OFF(group->t_group_query_retransmit_timer);
group_timer_off(group);
igmp_group_count_decr(pim_ifp);
static void igmp_read_on(struct gm_sock *igmp);
-static void pim_igmp_read(struct thread *t)
+static void pim_igmp_read(struct event *t)
{
uint8_t buf[10000];
- struct gm_sock *igmp = (struct gm_sock *)THREAD_ARG(t);
+ struct gm_sock *igmp = (struct gm_sock *)EVENT_ARG(t);
struct sockaddr_storage from;
struct sockaddr_storage to;
socklen_t fromlen = sizeof(from);
zlog_debug("Scheduling READ event on IGMP socket fd=%d",
igmp->fd);
}
- thread_add_read(router->master, pim_igmp_read, igmp, igmp->fd,
- &igmp->t_igmp_read);
+ event_add_read(router->master, pim_igmp_read, igmp, igmp->fd,
+ &igmp->t_igmp_read);
}
struct gm_sock *pim_igmp_sock_add(struct list *igmp_sock_list,
source records. Source records whose timers are zero (from the
previous EXCLUDE mode) are deleted.
*/
-static void igmp_group_timer(struct thread *t)
+static void igmp_group_timer(struct event *t)
{
struct gm_group *group;
- group = THREAD_ARG(t);
+ group = EVENT_ARG(t);
if (PIM_DEBUG_GM_TRACE) {
char group_str[INET_ADDRSTRLEN];
zlog_debug("Cancelling TIMER event for group %s on %s",
group_str, group->interface->name);
}
- THREAD_OFF(group->t_group_timer);
+ EVENT_OFF(group->t_group_timer);
}
void igmp_group_timer_on(struct gm_group *group, long interval_msec,
*/
assert(group->group_filtermode_isexcl);
- thread_add_timer_msec(router->master, igmp_group_timer, group,
- interval_msec, &group->t_group_timer);
+ event_add_timer_msec(router->master, igmp_group_timer, group,
+ interval_msec, &group->t_group_timer);
}
struct gm_group *find_group_by_addr(struct gm_sock *igmp,
pim_addr ifaddr;
time_t sock_creation;
- struct thread *t_igmp_read; /* read: IGMP sockets */
- struct thread
- *t_igmp_query_timer; /* timer: issue IGMP general queries */
- struct thread *t_other_querier_timer; /* timer: other querier present */
+ struct event *t_igmp_read; /* read: IGMP sockets */
+ /* timer: issue IGMP general queries */
+ struct event *t_igmp_query_timer;
+ struct event *t_other_querier_timer; /* timer: other querier present */
pim_addr querier_addr; /* IP address of the querier */
int querier_query_interval; /* QQI */
int querier_robustness_variable; /* QRV */
struct gm_source {
pim_addr source_addr;
- struct thread *t_source_timer;
+ struct event *t_source_timer;
struct gm_group *source_group; /* back pointer */
time_t source_creation;
uint32_t source_flags;
represents the time for the *filter-mode* of the group to expire and
switch to INCLUDE mode.
*/
- struct thread *t_group_timer;
+ struct event *t_group_timer;
/* Shared between group-specific and
group-and-source-specific retransmissions */
- struct thread *t_group_query_retransmit_timer;
+ struct event *t_group_query_retransmit_timer;
/* Counter exclusive for group-specific retransmissions
(not used by group-and-source-specific retransmissions,
igmp_group_timer_on(group, group_membership_interval_msec, ifp->name);
}
-static void igmp_source_timer(struct thread *t)
+static void igmp_source_timer(struct event *t)
{
struct gm_source *source;
struct gm_group *group;
- source = THREAD_ARG(t);
+ source = EVENT_ARG(t);
group = source->source_group;
group_str, source_str, group->interface->name);
}
- THREAD_OFF(source->t_source_timer);
+ EVENT_OFF(source->t_source_timer);
}
static void igmp_source_timer_on(struct gm_group *group,
source_str, group->interface->name);
}
- thread_add_timer_msec(router->master, igmp_source_timer, source,
- interval_msec, &source->t_source_timer);
+ event_add_timer_msec(router->master, igmp_source_timer, source,
+ interval_msec, &source->t_source_timer);
/*
RFC 3376: 6.3. IGMPv3 Source-Specific Forwarding Rules
return num_retransmit_sources_left;
}
-static void igmp_group_retransmit(struct thread *t)
+static void igmp_group_retransmit(struct event *t)
{
struct gm_group *group;
int num_retransmit_sources_left;
int send_with_sflag_set; /* boolean */
- group = THREAD_ARG(t);
+ group = EVENT_ARG(t);
if (PIM_DEBUG_GM_TRACE) {
char group_str[INET_ADDRSTRLEN];
group->interface->name);
}
- thread_add_timer_msec(router->master, igmp_group_retransmit, group,
- lmqi_msec,
- &group->t_group_query_retransmit_timer);
+ event_add_timer_msec(router->master, igmp_group_retransmit, group,
+ lmqi_msec, &group->t_group_query_retransmit_timer);
}
static long igmp_group_timer_remain_msec(struct gm_group *group)
pim_bsm_proc_free(pim);
/* Traverse and cleanup rpf_hash */
- if (pim->rpf_hash) {
- hash_clean(pim->rpf_hash, (void *)pim_rp_list_hash_clean);
- hash_free(pim->rpf_hash);
- pim->rpf_hash = NULL;
- }
+ hash_clean_and_free(&pim->rpf_hash, (void *)pim_rp_list_hash_clean);
pim_if_terminate(pim);
};
struct pim_router {
- struct thread_master *master;
+ struct event_loop *master;
uint32_t debugs;
/* Holds the client data(unencoded) that need to be pushed to MCLAGD*/
struct stream_fifo *mlag_fifo;
struct stream *mlag_stream;
- struct thread *zpthread_mlag_write;
+ struct event *zpthread_mlag_write;
struct in_addr anycast_vtep_ip;
struct in_addr local_vtep_ip;
struct pim_mlag_stats mlag_stats;
int send_v6_secondary;
- struct thread *thread;
+ struct event *thread;
int mroute_socket;
int reg_sock; /* Socket to send register msg */
int64_t mroute_socket_creation;
unsigned int gm_socket_if_count;
int gm_socket;
- struct thread *t_gm_recv;
+ struct event *t_gm_recv;
unsigned int gm_group_count;
unsigned int gm_watermark_limit;
uint64_t bsm_dropped;
/* If we need to rescan all our upstreams */
- struct thread *rpf_cache_refresher;
+ struct event *rpf_cache_refresher;
int64_t rpf_cache_refresh_requests;
int64_t rpf_cache_refresh_events;
int64_t rpf_cache_refresh_last;
if (PIM_IF_FLAG_TEST_S_G_RPT(child->flags)) {
if (child->ifjoin_state
== PIM_IFJOIN_PRUNE_PENDING_TMP)
- THREAD_OFF(
+ EVENT_OFF(
child->t_ifjoin_prune_pending_timer);
- THREAD_OFF(
- child->t_ifjoin_expiry_timer);
+ EVENT_OFF(child->t_ifjoin_expiry_timer);
PIM_IF_FLAG_UNSET_S_G_RPT(child->flags);
child->ifjoin_state = PIM_IFJOIN_NOINFO;
delete_on_noinfo(child);
#include "lib/version.h"
#include <getopt.h>
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include <signal.h>
#include "memory.h"
return 0;
}
-static void pim_mlag_register_handler(struct thread *thread)
+static void pim_mlag_register_handler(struct event *thread)
{
uint32_t bit_mask = 0;
router->mlag_process_register = true;
- thread_add_event(router->master, pim_mlag_register_handler, NULL, 0,
- NULL);
+ event_add_event(router->master, pim_mlag_register_handler, NULL, 0,
+ NULL);
}
-static void pim_mlag_deregister_handler(struct thread *thread)
+static void pim_mlag_deregister_handler(struct event *thread)
{
if (!zclient)
return;
router->mlag_process_register = false;
- thread_add_event(router->master, pim_mlag_deregister_handler, NULL, 0,
- NULL);
+ event_add_event(router->master, pim_mlag_deregister_handler, NULL, 0,
+ NULL);
}
void pim_if_configure_mlag_dualactive(struct pim_interface *pim_ifp)
return 0;
}
-static void mroute_read(struct thread *t)
+static void mroute_read(struct event *t)
{
struct pim_instance *pim;
static long long count;
int cont = 1;
int rd;
ifindex_t ifindex;
- pim = THREAD_ARG(t);
+ pim = EVENT_ARG(t);
while (cont) {
rd = pim_socket_recvfromto(pim->mroute_socket, (uint8_t *)buf,
static void mroute_read_on(struct pim_instance *pim)
{
- thread_add_read(router->master, mroute_read, pim, pim->mroute_socket,
- &pim->thread);
+ event_add_read(router->master, mroute_read, pim, pim->mroute_socket,
+ &pim->thread);
}
static void mroute_read_off(struct pim_instance *pim)
{
- THREAD_OFF(pim->thread);
+ EVENT_OFF(pim->thread);
}
int pim_mroute_socket_enable(struct pim_instance *pim)
#include <lib/prefix.h>
#include <lib/sockunion.h>
#include <lib/stream.h>
-#include <lib/thread.h>
+#include <frrevent.h>
#include <lib/vty.h>
#include <lib/plist.h>
#include <lib/lib_errors.h>
}
/* RFC-3618:Sec-5.1 - global active source advertisement timer */
-static void pim_msdp_sa_adv_timer_cb(struct thread *t)
+static void pim_msdp_sa_adv_timer_cb(struct event *t)
{
- struct pim_instance *pim = THREAD_ARG(t);
+ struct pim_instance *pim = EVENT_ARG(t);
if (PIM_DEBUG_MSDP_EVENTS) {
zlog_debug("MSDP SA advertisement timer expired");
static void pim_msdp_sa_adv_timer_setup(struct pim_instance *pim, bool start)
{
- THREAD_OFF(pim->msdp.sa_adv_timer);
+ EVENT_OFF(pim->msdp.sa_adv_timer);
if (start) {
- thread_add_timer(pim->msdp.master, pim_msdp_sa_adv_timer_cb,
- pim, PIM_MSDP_SA_ADVERTISMENT_TIME,
- &pim->msdp.sa_adv_timer);
+ event_add_timer(pim->msdp.master, pim_msdp_sa_adv_timer_cb, pim,
+ PIM_MSDP_SA_ADVERTISMENT_TIME,
+ &pim->msdp.sa_adv_timer);
}
}
/* RFC-3618:Sec-5.3 - SA cache state timer */
-static void pim_msdp_sa_state_timer_cb(struct thread *t)
+static void pim_msdp_sa_state_timer_cb(struct event *t)
{
struct pim_msdp_sa *sa;
- sa = THREAD_ARG(t);
+ sa = EVENT_ARG(t);
if (PIM_DEBUG_MSDP_EVENTS) {
pim_msdp_sa_timer_expiry_log(sa, "state");
static void pim_msdp_sa_state_timer_setup(struct pim_msdp_sa *sa, bool start)
{
- THREAD_OFF(sa->sa_state_timer);
+ EVENT_OFF(sa->sa_state_timer);
if (start) {
- thread_add_timer(sa->pim->msdp.master,
- pim_msdp_sa_state_timer_cb, sa,
- PIM_MSDP_SA_HOLD_TIME, &sa->sa_state_timer);
+ event_add_timer(sa->pim->msdp.master,
+ pim_msdp_sa_state_timer_cb, sa,
+ PIM_MSDP_SA_HOLD_TIME, &sa->sa_state_timer);
}
}
}
/* RFC-3618:Sec-5.4 - peer hold timer */
-static void pim_msdp_peer_hold_timer_cb(struct thread *t)
+static void pim_msdp_peer_hold_timer_cb(struct event *t)
{
struct pim_msdp_peer *mp;
- mp = THREAD_ARG(t);
+ mp = EVENT_ARG(t);
if (PIM_DEBUG_MSDP_EVENTS) {
pim_msdp_peer_timer_expiry_log(mp, "hold");
static void pim_msdp_peer_hold_timer_setup(struct pim_msdp_peer *mp, bool start)
{
struct pim_instance *pim = mp->pim;
- THREAD_OFF(mp->hold_timer);
+ EVENT_OFF(mp->hold_timer);
if (start) {
- thread_add_timer(pim->msdp.master, pim_msdp_peer_hold_timer_cb,
- mp, pim->msdp.hold_time, &mp->hold_timer);
+ event_add_timer(pim->msdp.master, pim_msdp_peer_hold_timer_cb,
+ mp, pim->msdp.hold_time, &mp->hold_timer);
}
}
/* RFC-3618:Sec-5.5 - peer keepalive timer */
-static void pim_msdp_peer_ka_timer_cb(struct thread *t)
+static void pim_msdp_peer_ka_timer_cb(struct event *t)
{
struct pim_msdp_peer *mp;
- mp = THREAD_ARG(t);
+ mp = EVENT_ARG(t);
if (PIM_DEBUG_MSDP_EVENTS) {
pim_msdp_peer_timer_expiry_log(mp, "ka");
static void pim_msdp_peer_ka_timer_setup(struct pim_msdp_peer *mp, bool start)
{
- THREAD_OFF(mp->ka_timer);
+ EVENT_OFF(mp->ka_timer);
if (start) {
- thread_add_timer(mp->pim->msdp.master,
- pim_msdp_peer_ka_timer_cb, mp,
- mp->pim->msdp.keep_alive, &mp->ka_timer);
+ event_add_timer(mp->pim->msdp.master, pim_msdp_peer_ka_timer_cb,
+ mp, mp->pim->msdp.keep_alive, &mp->ka_timer);
}
}
}
/* RFC-3618:Sec-5.6 - connection retry on active peer */
-static void pim_msdp_peer_cr_timer_cb(struct thread *t)
+static void pim_msdp_peer_cr_timer_cb(struct event *t)
{
struct pim_msdp_peer *mp;
- mp = THREAD_ARG(t);
+ mp = EVENT_ARG(t);
if (PIM_DEBUG_MSDP_EVENTS) {
pim_msdp_peer_timer_expiry_log(mp, "connect-retry");
static void pim_msdp_peer_cr_timer_setup(struct pim_msdp_peer *mp, bool start)
{
- THREAD_OFF(mp->cr_timer);
+ EVENT_OFF(mp->cr_timer);
if (start) {
- thread_add_timer(mp->pim->msdp.master,
- pim_msdp_peer_cr_timer_cb, mp,
- mp->pim->msdp.connection_retry, &mp->cr_timer);
+ event_add_timer(mp->pim->msdp.master, pim_msdp_peer_cr_timer_cb,
+ mp, mp->pim->msdp.connection_retry,
+ &mp->cr_timer);
}
}
}
/* MSDP init */
-void pim_msdp_init(struct pim_instance *pim, struct thread_master *master)
+void pim_msdp_init(struct pim_instance *pim, struct event_loop *master)
{
pim->msdp.master = master;
char hash_name[64];
while ((mg = SLIST_FIRST(&pim->msdp.mglist)) != NULL)
pim_msdp_mg_free(pim, &mg);
- if (pim->msdp.peer_hash) {
- hash_clean(pim->msdp.peer_hash, NULL);
- hash_free(pim->msdp.peer_hash);
- pim->msdp.peer_hash = NULL;
- }
+ hash_clean_and_free(&pim->msdp.peer_hash, NULL);
if (pim->msdp.peer_list) {
list_delete(&pim->msdp.peer_list);
}
- if (pim->msdp.sa_hash) {
- hash_clean(pim->msdp.sa_hash, NULL);
- hash_free(pim->msdp.sa_hash);
- pim->msdp.sa_hash = NULL;
- }
+ hash_clean_and_free(&pim->msdp.sa_hash, NULL);
if (pim->msdp.sa_list) {
list_delete(&pim->msdp.sa_list);
/* rfc-3618 is missing default value for SA-hold-down-Period. pulled
* this number from industry-standards */
#define PIM_MSDP_SA_HOLD_TIME ((3*60)+30)
- struct thread *sa_state_timer; // 5.6
+ struct event *sa_state_timer; // 5.6
int64_t uptime;
struct pim_upstream *up;
/* protocol timers */
#define PIM_MSDP_PEER_HOLD_TIME 75
- struct thread *hold_timer; // 5.4
+ struct event *hold_timer; // 5.4
#define PIM_MSDP_PEER_KA_TIME 60
- struct thread *ka_timer; // 5.5
+ struct event *ka_timer; // 5.5
#define PIM_MSDP_PEER_CONNECT_RETRY_TIME 30
- struct thread *cr_timer; // 5.6
+ struct event *cr_timer; // 5.6
/* packet thread and buffers */
uint32_t packet_size;
struct stream *ibuf;
struct stream_fifo *obuf;
- struct thread *t_read;
- struct thread *t_write;
+ struct event *t_read;
+ struct event *t_write;
/* stats */
uint32_t conn_attempts;
struct pim_msdp_listener {
int fd;
union sockunion su;
- struct thread *thread;
+ struct event *thread;
};
struct pim_msdp {
enum pim_msdp_flags flags;
- struct thread_master *master;
+ struct event_loop *master;
struct pim_msdp_listener listener;
uint32_t rejected_accepts;
/* MSDP active-source info */
#define PIM_MSDP_SA_ADVERTISMENT_TIME 60
- struct thread *sa_adv_timer; // 5.6
+ struct event *sa_adv_timer; // 5.6
struct hash *sa_hash;
struct list *sa_list;
uint32_t local_cnt;
};
#define PIM_MSDP_PEER_READ_ON(mp) \
- thread_add_read(mp->pim->msdp.master, pim_msdp_read, mp, mp->fd, \
- &mp->t_read)
+ event_add_read(mp->pim->msdp.master, pim_msdp_read, mp, mp->fd, \
+ &mp->t_read)
#define PIM_MSDP_PEER_WRITE_ON(mp) \
- thread_add_write(mp->pim->msdp.master, pim_msdp_write, mp, mp->fd, \
- &mp->t_write)
+ event_add_write(mp->pim->msdp.master, pim_msdp_write, mp, mp->fd, \
+ &mp->t_write)
-#define PIM_MSDP_PEER_READ_OFF(mp) thread_cancel(&mp->t_read)
-#define PIM_MSDP_PEER_WRITE_OFF(mp) thread_cancel(&mp->t_write)
+#define PIM_MSDP_PEER_READ_OFF(mp) event_cancel(&mp->t_read)
+#define PIM_MSDP_PEER_WRITE_OFF(mp) event_cancel(&mp->t_write)
#if PIM_IPV != 6
// struct pim_msdp *msdp;
struct pim_instance;
-void pim_msdp_init(struct pim_instance *pim, struct thread_master *master);
+void pim_msdp_init(struct pim_instance *pim, struct event_loop *master);
void pim_msdp_exit(struct pim_instance *pim);
char *pim_msdp_state_dump(enum pim_msdp_peer_state state, char *buf,
int buf_size);
void pim_msdp_peer_pkt_rxed(struct pim_msdp_peer *mp);
void pim_msdp_peer_stop_tcp_conn(struct pim_msdp_peer *mp, bool chg_state);
void pim_msdp_peer_reset_tcp_conn(struct pim_msdp_peer *mp, const char *rc_str);
-void pim_msdp_write(struct thread *thread);
+void pim_msdp_write(struct event *thread);
int pim_msdp_config_write(struct pim_instance *pim, struct vty *vty,
const char *spaces);
bool pim_msdp_peer_config_write(struct vty *vty, struct pim_instance *pim,
#else /* PIM_IPV == 6 */
static inline void pim_msdp_init(struct pim_instance *pim,
- struct thread_master *master)
+ struct event_loop *master)
{
}
#include <lib/log.h>
#include <lib/network.h>
#include <lib/stream.h>
-#include <lib/thread.h>
+#include "frrevent.h"
#include <lib/vty.h>
#include <lib/lib_errors.h>
}
}
-void pim_msdp_write(struct thread *thread)
+void pim_msdp_write(struct event *thread)
{
struct pim_msdp_peer *mp;
struct stream *s;
int work_cnt = 0;
int work_max_cnt = 100;
- mp = THREAD_ARG(thread);
+ mp = EVENT_ARG(thread);
mp->t_write = NULL;
if (PIM_DEBUG_MSDP_INTERNAL) {
return 0;
}
-void pim_msdp_read(struct thread *thread)
+void pim_msdp_read(struct event *thread)
{
struct pim_msdp_peer *mp;
int rc;
uint32_t len;
- mp = THREAD_ARG(thread);
+ mp = EVENT_ARG(thread);
mp->t_read = NULL;
if (PIM_DEBUG_MSDP_INTERNAL) {
#define PIM_MSDP_PKT_TYPE_STRLEN 16
void pim_msdp_pkt_ka_tx(struct pim_msdp_peer *mp);
-void pim_msdp_read(struct thread *thread);
+void pim_msdp_read(struct event *thread);
void pim_msdp_pkt_sa_tx(struct pim_instance *pim);
void pim_msdp_pkt_sa_tx_one(struct pim_msdp_sa *sa);
void pim_msdp_pkt_sa_tx_to_one_peer(struct pim_msdp_peer *mp);
#include <lib/log.h>
#include <lib/network.h>
#include <lib/sockunion.h>
-#include <lib/thread.h>
+#include "frrevent.h"
#include <lib/vty.h>
#include <lib/if.h>
#include <lib/vrf.h>
}
/* passive peer socket accept */
-static void pim_msdp_sock_accept(struct thread *thread)
+static void pim_msdp_sock_accept(struct event *thread)
{
union sockunion su;
- struct pim_instance *pim = THREAD_ARG(thread);
+ struct pim_instance *pim = EVENT_ARG(thread);
int accept_sock;
int msdp_sock;
struct pim_msdp_peer *mp;
sockunion_init(&su);
/* re-register accept thread */
- accept_sock = THREAD_FD(thread);
+ accept_sock = EVENT_FD(thread);
if (accept_sock < 0) {
flog_err(EC_LIB_DEVELOPMENT, "accept_sock is negative value %d",
accept_sock);
return;
}
pim->msdp.listener.thread = NULL;
- thread_add_read(router->master, pim_msdp_sock_accept, pim, accept_sock,
- &pim->msdp.listener.thread);
+ event_add_read(router->master, pim_msdp_sock_accept, pim, accept_sock,
+ &pim->msdp.listener.thread);
/* accept client connection. */
msdp_sock = sockunion_accept(accept_sock, &su);
/* add accept thread */
listener->fd = sock;
memcpy(&listener->su, &sin, socklen);
- thread_add_read(pim->msdp.master, pim_msdp_sock_accept, pim, sock,
- &listener->thread);
+ event_add_read(pim->msdp.master, pim_msdp_sock_accept, pim, sock,
+ &listener->thread);
pim->msdp.flags |= PIM_MSDPF_LISTENER;
return 0;
}
}
-static void on_neighbor_timer(struct thread *t)
+static void on_neighbor_timer(struct event *t)
{
struct pim_neighbor *neigh;
struct interface *ifp;
char msg[100];
- neigh = THREAD_ARG(t);
+ neigh = EVENT_ARG(t);
ifp = neigh->interface;
{
neigh->holdtime = holdtime;
- THREAD_OFF(neigh->t_expire_timer);
+ EVENT_OFF(neigh->t_expire_timer);
/*
0xFFFF is request for no holdtime
__func__, neigh->holdtime, &neigh->source_addr,
neigh->interface->name);
- thread_add_timer(router->master, on_neighbor_timer, neigh,
- neigh->holdtime, &neigh->t_expire_timer);
+ event_add_timer(router->master, on_neighbor_timer, neigh,
+ neigh->holdtime, &neigh->t_expire_timer);
}
-static void on_neighbor_jp_timer(struct thread *t)
+static void on_neighbor_jp_timer(struct event *t)
{
- struct pim_neighbor *neigh = THREAD_ARG(t);
+ struct pim_neighbor *neigh = EVENT_ARG(t);
struct pim_rpf rpf;
if (PIM_DEBUG_PIM_TRACE)
rpf.rpf_addr = neigh->source_addr;
pim_joinprune_send(&rpf, neigh->upstream_jp_agg);
- thread_add_timer(router->master, on_neighbor_jp_timer, neigh,
- router->t_periodic, &neigh->jp_timer);
+ event_add_timer(router->master, on_neighbor_jp_timer, neigh,
+ router->t_periodic, &neigh->jp_timer);
}
static void pim_neighbor_start_jp_timer(struct pim_neighbor *neigh)
{
- THREAD_OFF(neigh->jp_timer);
- thread_add_timer(router->master, on_neighbor_jp_timer, neigh,
- router->t_periodic, &neigh->jp_timer);
+ EVENT_OFF(neigh->jp_timer);
+ event_add_timer(router->master, on_neighbor_jp_timer, neigh,
+ router->t_periodic, &neigh->jp_timer);
}
static struct pim_neighbor *
delete_prefix_list(neigh);
list_delete(&neigh->upstream_jp_agg);
- THREAD_OFF(neigh->jp_timer);
+ EVENT_OFF(neigh->jp_timer);
bfd_sess_free(&neigh->bfd_session);
zlog_notice("PIM NEIGHBOR DOWN: neighbor %pPA on interface %s: %s",
&neigh->source_addr, ifp->name, delete_message);
- THREAD_OFF(neigh->t_expire_timer);
+ EVENT_OFF(neigh->t_expire_timer);
pim_if_assert_on_neighbor_down(ifp, neigh->source_addr);
uint32_t dr_priority;
uint32_t generation_id;
struct list *prefix_list; /* list of struct prefix */
- struct thread *t_expire_timer;
+ struct event *t_expire_timer;
struct interface *interface;
- struct thread *jp_timer;
+ struct event *jp_timer;
struct list *upstream_jp_agg;
struct bfd_session_params *bfd_session;
};
#include <zebra.h>
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "if.h"
#include "network.h"
#include "pim_bsm.h"
#include <lib/lib_errors.h>
-static void on_pim_hello_send(struct thread *t);
+static void on_pim_hello_send(struct event *t);
static const char *pim_pim_msgtype2str(enum pim_msg_type type)
{
pim_ifp->pim_sock_fd, ifp->name);
}
}
- THREAD_OFF(pim_ifp->t_pim_sock_read);
+ EVENT_OFF(pim_ifp->t_pim_sock_read);
if (PIM_DEBUG_PIM_TRACE) {
if (pim_ifp->t_pim_hello_timer) {
ifp->name);
}
}
- THREAD_OFF(pim_ifp->t_pim_hello_timer);
+ EVENT_OFF(pim_ifp->t_pim_hello_timer);
if (PIM_DEBUG_PIM_TRACE) {
zlog_debug("Deleting PIM socket fd=%d on interface %s",
static void pim_sock_read_on(struct interface *ifp);
-static void pim_sock_read(struct thread *t)
+static void pim_sock_read(struct event *t)
{
struct interface *ifp, *orig_ifp;
struct pim_interface *pim_ifp;
static long long count = 0;
int cont = 1;
- orig_ifp = ifp = THREAD_ARG(t);
- fd = THREAD_FD(t);
+ orig_ifp = ifp = EVENT_ARG(t);
+ fd = EVENT_FD(t);
pim_ifp = ifp->info;
zlog_debug("Scheduling READ event on PIM socket fd=%d",
pim_ifp->pim_sock_fd);
}
- thread_add_read(router->master, pim_sock_read, ifp,
- pim_ifp->pim_sock_fd, &pim_ifp->t_pim_sock_read);
+ event_add_read(router->master, pim_sock_read, ifp, pim_ifp->pim_sock_fd,
+ &pim_ifp->t_pim_sock_read);
}
static int pim_sock_open(struct interface *ifp)
zlog_debug("Rescheduling %d sec hello on interface %s",
pim_ifp->pim_hello_period, ifp->name);
}
- THREAD_OFF(pim_ifp->t_pim_hello_timer);
- thread_add_timer(router->master, on_pim_hello_send, ifp,
- pim_ifp->pim_hello_period,
- &pim_ifp->t_pim_hello_timer);
+ EVENT_OFF(pim_ifp->t_pim_hello_timer);
+ event_add_timer(router->master, on_pim_hello_send, ifp,
+ pim_ifp->pim_hello_period, &pim_ifp->t_pim_hello_timer);
}
/*
Periodic hello timer
*/
-static void on_pim_hello_send(struct thread *t)
+static void on_pim_hello_send(struct event *t)
{
struct pim_interface *pim_ifp;
struct interface *ifp;
- ifp = THREAD_ARG(t);
+ ifp = EVENT_ARG(t);
pim_ifp = ifp->info;
/*
return;
}
- THREAD_OFF(pim_ifp->t_pim_hello_timer);
+ EVENT_OFF(pim_ifp->t_pim_hello_timer);
}
random_msec = triggered_hello_delay_msec;
random_msec, ifp->name);
}
- thread_add_timer_msec(router->master, on_pim_hello_send, ifp,
- random_msec, &pim_ifp->t_pim_hello_timer);
+ event_add_timer_msec(router->master, on_pim_hello_send, ifp,
+ random_msec, &pim_ifp->t_pim_hello_timer);
}
int pim_sock_add(struct interface *ifp)
#include "log.h"
#include "if.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "vty.h"
#include "plist.h"
#include "pim_vxlan.h"
#include "pim_addr.h"
-struct thread *send_test_packet_timer = NULL;
+struct event *send_test_packet_timer = NULL;
void pim_register_join(struct pim_upstream *up)
{
&& (up->reg_state != PIM_REG_NOINFO)) {
pim_channel_del_oif(up->channel_oil, pim->regiface,
PIM_OIF_FLAG_PROTO_PIM, __func__);
- THREAD_OFF(up->t_rs_timer);
+ EVENT_OFF(up->t_rs_timer);
up->reg_state = PIM_REG_NOINFO;
}
}
list_delete(&pnc->rp_list);
- hash_clean(pnc->upstream_hash, NULL);
- hash_free(pnc->upstream_hash);
- pnc->upstream_hash = NULL;
+ hash_clean_and_free(&pnc->upstream_hash, NULL);
if (pnc->nexthop)
nexthops_free(pnc->nexthop);
{
assert(ss);
- THREAD_OFF(ss->t_sock_read);
+ EVENT_OFF(ss->t_sock_read);
if (close(ss->sock_fd)) {
zlog_warn(
return 0;
}
-static void ssmpingd_sock_read(struct thread *t)
+static void ssmpingd_sock_read(struct event *t)
{
struct ssmpingd_sock *ss;
- ss = THREAD_ARG(t);
+ ss = EVENT_ARG(t);
ssmpingd_read_msg(ss);
static void ssmpingd_read_on(struct ssmpingd_sock *ss)
{
- thread_add_read(router->master, ssmpingd_sock_read, ss, ss->sock_fd,
- &ss->t_sock_read);
+ event_add_read(router->master, ssmpingd_sock_read, ss, ss->sock_fd,
+ &ss->t_sock_read);
}
static struct ssmpingd_sock *ssmpingd_new(struct pim_instance *pim,
struct pim_instance *pim;
int sock_fd; /* socket */
- struct thread *t_sock_read; /* thread for reading socket */
+ struct event *t_sock_read; /* thread for reading socket */
pim_addr source_addr; /* source address */
int64_t creation; /* timestamp of socket creation */
int64_t requests; /* counter */
#include <time.h>
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "lib_errors.h"
#include "pim_time.h"
return wr != 8;
}
-void pim_time_timer_to_mmss(char *buf, int buf_size, struct thread *t_timer)
+void pim_time_timer_to_mmss(char *buf, int buf_size, struct event *t_timer)
{
if (t_timer) {
pim_time_mmss(buf, buf_size,
- thread_timer_remain_second(t_timer));
+ event_timer_remain_second(t_timer));
} else {
snprintf(buf, buf_size, "--:--");
}
}
-void pim_time_timer_to_hhmmss(char *buf, int buf_size, struct thread *t_timer)
+void pim_time_timer_to_hhmmss(char *buf, int buf_size, struct event *t_timer)
{
if (t_timer) {
pim_time_hhmmss(buf, buf_size,
- thread_timer_remain_second(t_timer));
+ event_timer_remain_second(t_timer));
} else {
snprintf(buf, buf_size, "--:--:--");
}
snprintf(buf, buf_size, "--:--:--");
}
-long pim_time_timer_remain_msec(struct thread *t_timer)
+long pim_time_timer_remain_msec(struct event *t_timer)
{
/* no timer thread running means timer has expired: return 0 */
- return t_timer ? thread_timer_remain_msec(t_timer) : 0;
+ return t_timer ? event_timer_remain_msec(t_timer) : 0;
}
#include <stdint.h>
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
int64_t pim_time_monotonic_sec(void);
int64_t pim_time_monotonic_dsec(void);
int64_t pim_time_monotonic_usec(void);
int pim_time_mmss(char *buf, int buf_size, long sec);
-void pim_time_timer_to_mmss(char *buf, int buf_size, struct thread *t);
-void pim_time_timer_to_hhmmss(char *buf, int buf_size, struct thread *t);
+void pim_time_timer_to_mmss(char *buf, int buf_size, struct event *t);
+void pim_time_timer_to_hhmmss(char *buf, int buf_size, struct event *t);
void pim_time_uptime(char *buf, int buf_size, int64_t uptime_sec);
void pim_time_uptime_begin(char *buf, int buf_size, int64_t now, int64_t begin);
-long pim_time_timer_remain_msec(struct thread *t_timer);
+long pim_time_timer_remain_msec(struct event *t_timer);
#endif /* PIM_TIME_H */
#include "log.h"
#include "zclient.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "linklist.h"
#include "vty.h"
#include "plist.h"
static void pim_upstream_timers_stop(struct pim_upstream *up)
{
- THREAD_OFF(up->t_ka_timer);
- THREAD_OFF(up->t_rs_timer);
- THREAD_OFF(up->t_msdp_reg_timer);
- THREAD_OFF(up->t_join_timer);
+ EVENT_OFF(up->t_ka_timer);
+ EVENT_OFF(up->t_rs_timer);
+ EVENT_OFF(up->t_msdp_reg_timer);
+ EVENT_OFF(up->t_join_timer);
}
struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
pim_jp_agg_single_upstream_send(&up->rpf, up, 1 /* join */);
}
-static void on_join_timer(struct thread *t)
+static void on_join_timer(struct event *t)
{
struct pim_upstream *up;
- up = THREAD_ARG(t);
+ up = EVENT_ARG(t);
if (!up->rpf.source_nexthop.interface) {
if (PIM_DEBUG_PIM_TRACE)
{
struct pim_neighbor *nbr = NULL;
- THREAD_OFF(up->t_join_timer);
+ EVENT_OFF(up->t_join_timer);
if (up->rpf.source_nexthop.interface)
nbr = pim_neighbor_find(up->rpf.source_nexthop.interface,
if (nbr)
pim_jp_agg_add_group(nbr->upstream_jp_agg, up, 1, nbr);
else {
- THREAD_OFF(up->t_join_timer);
- thread_add_timer(router->master, on_join_timer, up,
- router->t_periodic, &up->t_join_timer);
+ EVENT_OFF(up->t_join_timer);
+ event_add_timer(router->master, on_join_timer, up,
+ router->t_periodic, &up->t_join_timer);
}
pim_jp_agg_upstream_verification(up, true);
}
void pim_upstream_join_timer_restart(struct pim_upstream *up,
struct pim_rpf *old)
{
- // THREAD_OFF(up->t_join_timer);
+ // EVENT_OFF(up->t_join_timer);
join_timer_start(up);
}
__func__, interval_msec, up->sg_str);
}
- THREAD_OFF(up->t_join_timer);
- thread_add_timer_msec(router->master, on_join_timer, up, interval_msec,
- &up->t_join_timer);
+ EVENT_OFF(up->t_join_timer);
+ event_add_timer_msec(router->master, on_join_timer, up, interval_msec,
+ &up->t_join_timer);
}
void pim_update_suppress_timers(uint32_t suppress_time)
up->sg_str);
/* stop reg-stop timer */
- THREAD_OFF(up->t_rs_timer);
+ EVENT_OFF(up->t_rs_timer);
/* remove regiface from the OIL if it is there*/
pim_channel_del_oif(up->channel_oil, pim->regiface,
PIM_OIF_FLAG_PROTO_PIM, __func__);
return up;
}
-static void pim_upstream_keep_alive_timer(struct thread *t)
+static void pim_upstream_keep_alive_timer(struct event *t)
{
struct pim_upstream *up;
- up = THREAD_ARG(t);
+ up = EVENT_ARG(t);
/* pull the stats and re-check */
if (pim_upstream_sg_running_proc(up))
zlog_debug("kat start on %s with no stream reference",
up->sg_str);
}
- THREAD_OFF(up->t_ka_timer);
- thread_add_timer(router->master, pim_upstream_keep_alive_timer, up,
- time, &up->t_ka_timer);
+ EVENT_OFF(up->t_ka_timer);
+ event_add_timer(router->master, pim_upstream_keep_alive_timer, up, time,
+ &up->t_ka_timer);
/* any time keepalive is started against a SG we will have to
* re-evaluate our active source database */
}
/* MSDP on RP needs to know if a source is registerable to this RP */
-static void pim_upstream_msdp_reg_timer(struct thread *t)
+static void pim_upstream_msdp_reg_timer(struct event *t)
{
- struct pim_upstream *up = THREAD_ARG(t);
+ struct pim_upstream *up = EVENT_ARG(t);
struct pim_instance *pim = up->channel_oil->pim;
/* source is no longer active - pull the SA from MSDP's cache */
void pim_upstream_msdp_reg_timer_start(struct pim_upstream *up)
{
- THREAD_OFF(up->t_msdp_reg_timer);
- thread_add_timer(router->master, pim_upstream_msdp_reg_timer, up,
- PIM_MSDP_REG_RXED_PERIOD, &up->t_msdp_reg_timer);
+ EVENT_OFF(up->t_msdp_reg_timer);
+ event_add_timer(router->master, pim_upstream_msdp_reg_timer, up,
+ PIM_MSDP_REG_RXED_PERIOD, &up->t_msdp_reg_timer);
pim_msdp_sa_local_update(up);
}
return state_str;
}
-static void pim_upstream_register_stop_timer(struct thread *t)
+static void pim_upstream_register_stop_timer(struct event *t)
{
struct pim_interface *pim_ifp;
struct pim_instance *pim;
struct pim_upstream *up;
- up = THREAD_ARG(t);
+ up = EVENT_ARG(t);
pim = up->channel_oil->pim;
if (PIM_DEBUG_PIM_TRACE) {
{
uint32_t time;
- THREAD_OFF(up->t_rs_timer);
+ EVENT_OFF(up->t_rs_timer);
if (!null_register) {
uint32_t lower = (0.5 * router->register_suppress_time);
"%s: (S,G)=%s Starting upstream register stop timer %d",
__func__, up->sg_str, time);
}
- thread_add_timer(router->master, pim_upstream_register_stop_timer, up,
- time, &up->t_rs_timer);
+ event_add_timer(router->master, pim_upstream_register_stop_timer, up,
+ time, &up->t_rs_timer);
}
int pim_upstream_inherited_olist_decide(struct pim_instance *pim,
struct pim_up_mlag mlag;
- struct thread *t_join_timer;
+ struct event *t_join_timer;
/*
* RST(S,G)
*/
- struct thread *t_rs_timer;
+ struct event *t_rs_timer;
#define PIM_REGISTER_SUPPRESSION_PERIOD (60)
#define PIM_REGISTER_PROBE_PERIOD (5)
/*
* KAT(S,G)
*/
- struct thread *t_ka_timer;
+ struct event *t_ka_timer;
#define PIM_KEEPALIVE_PERIOD (210)
#define PIM_RP_KEEPALIVE_PERIOD \
(3 * router->register_suppress_time + router->register_probe_time)
/* on the RP we restart a timer to indicate if registers are being rxed
* for
* SG. This is needed by MSDP to determine its local SA cache */
- struct thread *t_msdp_reg_timer;
+ struct event *t_msdp_reg_timer;
#define PIM_MSDP_REG_RXED_PERIOD (3 * (1.5 * router->register_suppress_time))
int64_t state_transition; /* Record current state uptime */
pim_vxlan_del_work(vxlan_sg);
}
-static void pim_vxlan_work_timer_cb(struct thread *t)
+static void pim_vxlan_work_timer_cb(struct event *t)
{
pim_vxlan_do_reg_work();
pim_vxlan_work_timer_setup(true /* start */);
/* global 1second timer used for periodic processing */
static void pim_vxlan_work_timer_setup(bool start)
{
- THREAD_OFF(vxlan_info.work_timer);
+ EVENT_OFF(vxlan_info.work_timer);
if (start)
- thread_add_timer(router->master, pim_vxlan_work_timer_cb, NULL,
- PIM_VXLAN_WORK_TIME, &vxlan_info.work_timer);
+ event_add_timer(router->master, pim_vxlan_work_timer_cb, NULL,
+ PIM_VXLAN_WORK_TIME, &vxlan_info.work_timer);
}
/**************************** vxlan origination mroutes ***********************
* if there are no other references.
*/
if (PIM_UPSTREAM_FLAG_TEST_SRC_STREAM(up->flags)) {
- THREAD_OFF(up->t_ka_timer);
+ EVENT_OFF(up->t_ka_timer);
up = pim_upstream_keep_alive_timer_proc(up);
} else {
/* this is really unexpected as we force vxlan
void pim_vxlan_exit(struct pim_instance *pim)
{
- if (pim->vxlan.sg_hash) {
- hash_clean(pim->vxlan.sg_hash,
- (void (*)(void *))pim_vxlan_sg_del_item);
- hash_free(pim->vxlan.sg_hash);
- pim->vxlan.sg_hash = NULL;
- }
+ hash_clean_and_free(&pim->vxlan.sg_hash,
+ (void (*)(void *))pim_vxlan_sg_del_item);
}
void pim_vxlan_terminate(void)
struct pim_vxlan {
enum pim_vxlan_flags flags;
- struct thread *work_timer;
+ struct event *work_timer;
struct list *work_list;
struct listnode *next_work;
int max_work_cnt;
{
struct interface *ifp;
vrf_id_t new_vrf_id;
+ struct pim_instance *pim;
+ struct pim_interface *pim_ifp;
ifp = zebra_interface_vrf_update_read(zclient->ibuf, vrf_id,
&new_vrf_id);
zlog_debug("%s: %s updating from %u to %u", __func__, ifp->name,
vrf_id, new_vrf_id);
+ pim = pim_get_pim_instance(new_vrf_id);
+
if_update_to_new_vrf(ifp, new_vrf_id);
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return 0;
+
+ pim_ifp->pim->mcast_if_count--;
+ pim_ifp->pim = pim;
+ pim_ifp->pim->mcast_if_count++;
+
return 0;
}
pim_upstream_mroute_iif_update(c_oil, __func__);
}
-static void on_rpf_cache_refresh(struct thread *t)
+static void on_rpf_cache_refresh(struct event *t)
{
- struct pim_instance *pim = THREAD_ARG(t);
+ struct pim_instance *pim = EVENT_ARG(t);
/* update kernel multicast forwarding cache (MFC) */
pim_scan_oil(pim);
router->rpf_cache_refresh_delay_msec);
}
- thread_add_timer_msec(router->master, on_rpf_cache_refresh, pim,
- router->rpf_cache_refresh_delay_msec,
- &pim->rpf_cache_refresher);
+ event_add_timer_msec(router->master, on_rpf_cache_refresh, pim,
+ router->rpf_cache_refresh_delay_msec,
+ &pim->rpf_cache_refresher);
}
static void pim_zebra_connected(struct zclient *zclient)
#include "zclient.h"
#include "stream.h"
#include "network.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "vty.h"
#include "lib_errors.h"
#include "pim_addr.h"
static struct zclient *zlookup = NULL;
-struct thread *zlookup_read;
+struct event *zlookup_read;
static void zclient_lookup_sched(struct zclient *zlookup, int delay);
-static void zclient_lookup_read_pipe(struct thread *thread);
+static void zclient_lookup_read_pipe(struct event *thread);
/* Connect to zebra for nexthop lookup. */
-static void zclient_lookup_connect(struct thread *t)
+static void zclient_lookup_connect(struct event *t)
{
struct zclient *zlookup;
- zlookup = THREAD_ARG(t);
+ zlookup = EVENT_ARG(t);
if (zlookup->sock >= 0) {
return;
return;
}
- thread_add_timer(router->master, zclient_lookup_read_pipe, zlookup, 60,
- &zlookup_read);
+ event_add_timer(router->master, zclient_lookup_read_pipe, zlookup, 60,
+ &zlookup_read);
}
/* Schedule connection with delay. */
static void zclient_lookup_sched(struct zclient *zlookup, int delay)
{
- thread_add_timer(router->master, zclient_lookup_connect, zlookup, delay,
- &zlookup->t_connect);
+ event_add_timer(router->master, zclient_lookup_connect, zlookup, delay,
+ &zlookup->t_connect);
zlog_notice("%s: zclient lookup connection scheduled for %d seconds",
__func__, delay);
/* Schedule connection for now. */
static void zclient_lookup_sched_now(struct zclient *zlookup)
{
- thread_add_event(router->master, zclient_lookup_connect, zlookup, 0,
- &zlookup->t_connect);
+ event_add_event(router->master, zclient_lookup_connect, zlookup, 0,
+ &zlookup->t_connect);
zlog_notice("%s: zclient lookup immediate connection scheduled",
__func__);
void zclient_lookup_free(void)
{
- THREAD_OFF(zlookup_read);
+ EVENT_OFF(zlookup_read);
zclient_stop(zlookup);
zclient_free(zlookup);
zlookup = NULL;
return zclient_read_nexthop(pim, zlookup, nexthop_tab, tab_size, addr);
}
-void zclient_lookup_read_pipe(struct thread *thread)
+void zclient_lookup_read_pipe(struct event *thread)
{
- struct zclient *zlookup = THREAD_ARG(thread);
+ struct zclient *zlookup = EVENT_ARG(thread);
struct pim_instance *pim = pim_get_pim_instance(VRF_DEFAULT);
struct pim_zlookup_nexthop nexthop_tab[10];
pim_addr l = PIMADDR_ANY;
}
zclient_lookup_nexthop_once(pim, nexthop_tab, 10, l);
- thread_add_timer(router->master, zclient_lookup_read_pipe, zlookup, 60,
- &zlookup_read);
+ event_add_timer(router->master, zclient_lookup_read_pipe, zlookup, 60,
+ &zlookup_read);
}
int zclient_lookup_nexthop(struct pim_instance *pim,
* Thsi thread reads the clients data from the Gloabl queue and encodes with
* protobuf and pass on to the MLAG socket.
*/
-static void pim_mlag_zthread_handler(struct thread *event)
+static void pim_mlag_zthread_handler(struct event *event)
{
struct stream *read_s;
uint32_t wr_count = 0;
if (PIM_DEBUG_MLAG)
zlog_debug(":%s: Scheduling PIM MLAG write Thread",
__func__);
- thread_add_event(router->master, pim_mlag_zthread_handler, NULL,
- 0, &router->zpthread_mlag_write);
+ event_add_event(router->master, pim_mlag_zthread_handler, NULL,
+ 0, &router->zpthread_mlag_write);
}
return (0);
}
--- /dev/null
+#!/bin/sh
+#
+# mgmtd is part of the quagga routing beast
+#
+# PROVIDE: mgmtd
+# REQUIRE: none
+##
+
+PATH=/sbin:/bin:/usr/sbin:/usr/bin:@prefix@/sbin:@prefix@/bin
+export PATH
+
+if [ -f /etc/rc.subr ]
+then
+ . /etc/rc.subr
+fi
+
+name="mgmtd"
+rcvar=$name
+required_files="@sysconfdir@/${name}.conf"
+command="@prefix@/sbin/${name}"
+command_args="-d"
+
+start_precmd="zebra_precmd"
+socket_dir=@localstatedir@
+pidfile="${socket_dir}/${name}.pid"
+
+zebra_precmd()
+{
+ rc_flags="$(
+ set -- $rc_flags
+ while [ $# -ne 0 ]; do
+ if [ X"$1" = X-P -o X"$1" = X-A ]; then
+ break
+ fi
+ shift
+ done
+ if [ $# -eq 0 ]; then
+ echo "-P 0"
+ fi
+ ) $rc_flags"
+}
+
+load_rc_config $name
+run_rc_command "$1"
"lib/routemap.c": "VTYSH_RMAP",
"lib/routemap_cli.c": "VTYSH_RMAP",
"lib/spf_backoff.c": "VTYSH_ISISD",
- "lib/thread.c": "VTYSH_ALL",
+ "lib/event.c": "VTYSH_ALL",
"lib/vrf.c": "VTYSH_VRF",
"lib/vty.c": "VTYSH_ALL",
}
def load(cls, xref):
nodes = NodeDict()
+ mgmtname = "mgmtd/libmgmt_be_nb.la"
for cmd_name, origins in xref.get("cli", {}).items():
+ # If mgmtd has a yang version of a CLI command, make it the only daemon
+ # to handle it. For now, daemons can still be compiling their cmds into the
+ # binaries to allow for running standalone with CLI config files. When they
+ # do this they will also be present in the xref file, but we want to ignore
+ # those in vtysh.
+ if "yang" in origins.get(mgmtname, {}).get("attrs", []):
+ CommandEntry.process(nodes, cmd_name, mgmtname, origins[mgmtname])
+ continue
+
for origin, spec in origins.items():
CommandEntry.process(nodes, cmd_name, origin, spec)
return nodes
# constants, need to be kept in sync manually...
-XREFT_THREADSCHED = 0x100
+XREFT_EVENTSCHED = 0x100
XREFT_LOGMSG = 0x200
XREFT_DEFUN = 0x300
XREFT_INSTALL_ELEMENT = 0x301
struct = "xref_threadsched"
-Xref.containers[XREFT_THREADSCHED] = XrefThreadSched
+Xref.containers[XREFT_EVENTSCHED] = XrefThreadSched
class XrefLogmsg(ELFDissectStruct, XrelfoJson):
# end
EXTRA_DIST += qpb/qpb.proto
-SUFFIXES += .proto .pb-c.c .pb-c.h
-
-if HAVE_PROTOBUF
-
-# Rules
-.proto.pb.h:
- $(PROTOC) -I$(top_srcdir) --cpp_out=$(top_builddir) $^
-
-AM_V_PROTOC_C = $(am__v_PROTOC_C_$(V))
-am__v_PROTOC_C_ = $(am__v_PROTOC_C_$(AM_DEFAULT_VERBOSITY))
-am__v_PROTOC_C_0 = @echo " PROTOC_C" $@;
-am__v_PROTOC_C_1 =
-
-.proto.pb-c.c:
- $(AM_V_PROTOC_C)$(PROTOC_C) -I$(top_srcdir) --c_out=$(top_builddir) $^
- $(AM_V_GEN)$(SED) -e '1i#include "config.h"' -i $@
-.pb-c.c.pb-c.h:
- @/bin/true
-
-endif # HAVE_PROTOBUF
%{_sbindir}/ospfd
%{_sbindir}/ripd
%{_sbindir}/bgpd
+%{_sbindir}/mgmtd
%exclude %{_sbindir}/ssd
%if %{with_watchfrr}
%{_sbindir}/watchfrr
%{_libdir}/frr/modules/dplane_fpm_nl.so
%{_libdir}/frr/modules/zebra_irdp.so
%{_libdir}/frr/modules/bgpd_bmp.so
+%{_libdir}/libfrr_pb.so*
+%{_libdir}/libfrrfpm_pb.so*
+%{_libdir}/libmgmt_be_nb.so*
%{_bindir}/*
%config(noreplace) %{configdir}/[!v]*.conf*
%config(noreplace) %attr(750,%{frr_user},%{frr_user}) %{configdir}/daemons
%{_libdir}/lib*.so
%dir %{_includedir}/%{name}
%{_includedir}/%{name}/*.h
+%dir %{_includedir}/%{name}/mgmtd
+%{_includedir}/%{name}/mgmtd/*.h
%dir %{_includedir}/%{name}/ospfd
%{_includedir}/%{name}/ospfd/*.h
%if %{with_bfdd}
#include "table.h"
#include "log.h"
#include "stream.h"
-#include "thread.h"
+#include "frrevent.h"
#include "zclient.h"
#include "filter.h"
#include "sockopt.h"
ri->enable_interface = 0;
ri->running = 0;
- THREAD_OFF(ri->t_wakeup);
+ EVENT_OFF(ri->t_wakeup);
}
void rip_interfaces_clean(struct rip *rip)
ri = ifp->info;
- THREAD_OFF(ri->t_wakeup);
+ EVENT_OFF(ri->t_wakeup);
rip = ri->rip;
if (rip) {
}
/* Join to multicast group and send request to the interface. */
-static void rip_interface_wakeup(struct thread *t)
+static void rip_interface_wakeup(struct event *t)
{
struct interface *ifp;
struct rip_interface *ri;
/* Get interface. */
- ifp = THREAD_ARG(t);
+ ifp = EVENT_ARG(t);
ri = ifp->info;
zlog_debug("turn on %s", ifp->name);
/* Add interface wake up thread. */
- thread_add_timer(master, rip_interface_wakeup, ifp, 1,
- &ri->t_wakeup);
+ event_add_timer(master, rip_interface_wakeup, ifp, 1,
+ &ri->t_wakeup);
rip_connect_set(ifp, 1);
} else if (ri->running) {
/* Might as well clean up the route table as well
#include <lib/version.h>
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include "command.h"
#include "memory.h"
#include "prefix.h"
.cap_num_i = 0};
/* Master of threads. */
-struct thread_master *master;
+struct event_loop *master;
static struct frr_daemon_info ripd_di;
}
if (rinfo) {
- THREAD_OFF(rinfo->t_timeout);
- THREAD_OFF(rinfo->t_garbage_collect);
+ EVENT_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_garbage_collect);
listnode_delete(list, rinfo);
rip_info_free(rinfo);
}
#include "prefix.h"
#include "command.h"
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "ripd/ripd.h"
static void rip_peer_free(struct rip_peer *peer)
{
- THREAD_OFF(peer->t_timeout);
+ EVENT_OFF(peer->t_timeout);
XFREE(MTYPE_RIP_PEER, peer);
}
}
/* RIP peer is timeout. */
-static void rip_peer_timeout(struct thread *t)
+static void rip_peer_timeout(struct event *t)
{
struct rip_peer *peer;
- peer = THREAD_ARG(t);
+ peer = EVENT_ARG(t);
listnode_delete(peer->rip->peer_list, peer);
rip_peer_free(peer);
}
peer = rip_peer_lookup(rip, addr);
if (peer) {
- THREAD_OFF(peer->t_timeout);
+ EVENT_OFF(peer->t_timeout);
} else {
peer = rip_peer_new();
peer->rip = rip;
}
/* Update timeout thread. */
- thread_add_timer(master, rip_peer_timeout, peer, RIP_PEER_TIMER_DEFAULT,
- &peer->t_timeout);
+ event_add_timer(master, rip_peer_timeout, peer, RIP_PEER_TIMER_DEFAULT,
+ &peer->t_timeout);
/* Last update time set. */
time(&peer->uptime);
{RIP2PEERRCVBADPACKETS, COUNTER, RONLY, rip2PeerTable, 3, {4, 1, 5}},
{RIP2PEERRCVBADROUTES, COUNTER, RONLY, rip2PeerTable, 3, {4, 1, 6}}};
-extern struct thread_master *master;
+extern struct event_loop *master;
static uint8_t *rip2Globals(struct variable *v, oid name[], size_t *length,
int exact, size_t *var_len,
}
/* Register RIPv2-MIB. */
-static int rip_snmp_init(struct thread_master *master)
+static int rip_snmp_init(struct event_loop *master)
{
rip_ifaddr_table = route_table_init();
[ZEBRA_REDISTRIBUTE_ROUTE_DEL] = rip_zebra_read_route,
};
-void rip_zclient_init(struct thread_master *master)
+void rip_zclient_init(struct event_loop *master)
{
/* Set default value to the zebra client structure. */
zclient = zclient_new(master, &zclient_options_default, rip_handlers,
#include "command.h"
#include "prefix.h"
#include "table.h"
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "log.h"
#include "stream.h"
/* Prototypes. */
static void rip_output_process(struct connected *, struct sockaddr_in *, int,
uint8_t);
-static void rip_triggered_update(struct thread *);
+static void rip_triggered_update(struct event *);
static int rip_update_jitter(unsigned long);
static void rip_distance_table_node_cleanup(struct route_table *table,
struct route_node *node);
}
/* RIP route garbage collect timer. */
-static void rip_garbage_collect(struct thread *t)
+static void rip_garbage_collect(struct event *t)
{
struct rip_info *rinfo;
struct route_node *rp;
- rinfo = THREAD_ARG(t);
+ rinfo = EVENT_ARG(t);
/* Off timeout timer. */
- THREAD_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_timeout);
/* Get route_node pointer. */
rp = rinfo->rp;
if (tmp_rinfo == rinfo)
continue;
- THREAD_OFF(tmp_rinfo->t_timeout);
- THREAD_OFF(tmp_rinfo->t_garbage_collect);
+ EVENT_OFF(tmp_rinfo->t_timeout);
+ EVENT_OFF(tmp_rinfo->t_garbage_collect);
list_delete_node(list, node);
rip_info_free(tmp_rinfo);
}
- THREAD_OFF(rinfo->t_timeout);
- THREAD_OFF(rinfo->t_garbage_collect);
+ EVENT_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_garbage_collect);
memcpy(rinfo, rinfo_new, sizeof(struct rip_info));
if (rip_route_rte(rinfo)) {
struct route_node *rp = rinfo->rp;
struct list *list = (struct list *)rp->info;
- THREAD_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_timeout);
if (listcount(list) > 1) {
/* Some other ECMP entries still exist. Just delete this entry.
*/
- THREAD_OFF(rinfo->t_garbage_collect);
+ EVENT_OFF(rinfo->t_garbage_collect);
listnode_delete(list, rinfo);
if (rip_route_rte(rinfo)
&& CHECK_FLAG(rinfo->flags, RIP_RTF_FIB))
}
/* Timeout RIP routes. */
-static void rip_timeout(struct thread *t)
+static void rip_timeout(struct event *t)
{
- struct rip_info *rinfo = THREAD_ARG(t);
+ struct rip_info *rinfo = EVENT_ARG(t);
struct rip *rip = rip_info_get_instance(rinfo);
rip_ecmp_delete(rip, rinfo);
static void rip_timeout_update(struct rip *rip, struct rip_info *rinfo)
{
if (rinfo->metric != RIP_METRIC_INFINITY) {
- THREAD_OFF(rinfo->t_timeout);
- thread_add_timer(master, rip_timeout, rinfo, rip->timeout_time,
- &rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_timeout);
+ event_add_timer(master, rip_timeout, rinfo, rip->timeout_time,
+ &rinfo->t_timeout);
}
}
assert(newinfo.metric
!= RIP_METRIC_INFINITY);
- THREAD_OFF(rinfo->t_timeout);
- THREAD_OFF(rinfo->t_garbage_collect);
+ EVENT_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_garbage_collect);
memcpy(rinfo, &newinfo,
sizeof(struct rip_info));
rip_timeout_update(rip, rinfo);
RIP_TIMER_ON(rinfo->t_garbage_collect,
rip_garbage_collect,
rip->garbage_time);
- THREAD_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_timeout);
rinfo->flags |= RIP_RTF_CHANGED;
if (IS_RIP_DEBUG_EVENT)
}
/* First entry point of RIP packet. */
-static void rip_read(struct thread *t)
+static void rip_read(struct event *t)
{
- struct rip *rip = THREAD_ARG(t);
+ struct rip *rip = EVENT_ARG(t);
int sock;
int ret;
int rtenum;
struct prefix p;
/* Fetch socket then register myself. */
- sock = THREAD_FD(t);
+ sock = EVENT_FD(t);
/* Add myself to tne next event */
rip_event(rip, RIP_READ, sock);
}
/* RIP's periodical timer. */
-static void rip_update(struct thread *t)
+static void rip_update(struct event *t)
{
- struct rip *rip = THREAD_ARG(t);
+ struct rip *rip = EVENT_ARG(t);
if (IS_RIP_DEBUG_EVENT)
zlog_debug("update timer fire!");
/* Triggered updates may be suppressed if a regular update is due by
the time the triggered update would be sent. */
- THREAD_OFF(rip->t_triggered_interval);
+ EVENT_OFF(rip->t_triggered_interval);
rip->trigger = 0;
/* Register myself. */
}
/* Triggered update interval timer. */
-static void rip_triggered_interval(struct thread *t)
+static void rip_triggered_interval(struct event *t)
{
- struct rip *rip = THREAD_ARG(t);
+ struct rip *rip = EVENT_ARG(t);
if (rip->trigger) {
rip->trigger = 0;
}
/* Execute triggered update. */
-static void rip_triggered_update(struct thread *t)
+static void rip_triggered_update(struct event *t)
{
- struct rip *rip = THREAD_ARG(t);
+ struct rip *rip = EVENT_ARG(t);
int interval;
/* Cancel interval timer. */
- THREAD_OFF(rip->t_triggered_interval);
+ EVENT_OFF(rip->t_triggered_interval);
rip->trigger = 0;
/* Logging triggered update. */
update is triggered when the timer expires. */
interval = (frr_weak_random() % 5) + 1;
- thread_add_timer(master, rip_triggered_interval, rip, interval,
- &rip->t_triggered_interval);
+ event_add_timer(master, rip_triggered_interval, rip, interval,
+ &rip->t_triggered_interval);
}
/* Withdraw redistributed route. */
rinfo->metric = RIP_METRIC_INFINITY;
RIP_TIMER_ON(rinfo->t_garbage_collect, rip_garbage_collect,
rip->garbage_time);
- THREAD_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_timeout);
rinfo->flags |= RIP_RTF_CHANGED;
if (IS_RIP_DEBUG_EVENT) {
switch (event) {
case RIP_READ:
- thread_add_read(master, rip_read, rip, sock, &rip->t_read);
+ event_add_read(master, rip_read, rip, sock, &rip->t_read);
break;
case RIP_UPDATE_EVENT:
- THREAD_OFF(rip->t_update);
+ EVENT_OFF(rip->t_update);
jitter = rip_update_jitter(rip->update_time);
- thread_add_timer(master, rip_update, rip,
- sock ? 2 : rip->update_time + jitter,
- &rip->t_update);
+ event_add_timer(master, rip_update, rip,
+ sock ? 2 : rip->update_time + jitter,
+ &rip->t_update);
break;
case RIP_TRIGGERED_UPDATE:
if (rip->t_triggered_interval)
rip->trigger = 1;
else
- thread_add_event(master, rip_triggered_update, rip, 0,
- &rip->t_triggered_update);
+ event_add_event(master, rip_triggered_update, rip, 0,
+ &rip->t_triggered_update);
break;
default:
break;
if (tmp_rinfo == rinfo)
continue;
- THREAD_OFF(tmp_rinfo->t_timeout);
- THREAD_OFF(tmp_rinfo->t_garbage_collect);
+ EVENT_OFF(tmp_rinfo->t_timeout);
+ EVENT_OFF(tmp_rinfo->t_garbage_collect);
list_delete_node(list, node);
rip_info_free(tmp_rinfo);
}
struct tm tm;
#define TIME_BUF 25
char timebuf[TIME_BUF];
- struct thread *thread;
+ struct event *thread;
if ((thread = rinfo->t_timeout) != NULL) {
- clock = thread_timer_remain_second(thread);
+ clock = event_timer_remain_second(thread);
gmtime_r(&clock, &tm);
strftime(timebuf, TIME_BUF, "%M:%S", &tm);
vty_out(vty, "%5s", timebuf);
} else if ((thread = rinfo->t_garbage_collect) != NULL) {
- clock = thread_timer_remain_second(thread);
+ clock = event_timer_remain_second(thread);
gmtime_r(&clock, &tm);
strftime(timebuf, TIME_BUF, "%M:%S", &tm);
vty_out(vty, "%5s", timebuf);
vty_out(vty, " Sending updates every %u seconds with +/-50%%,",
rip->update_time);
vty_out(vty, " next due in %lu seconds\n",
- thread_timer_remain_second(rip->t_update));
+ event_timer_remain_second(rip->t_update));
vty_out(vty, " Timeout after %u seconds,", rip->timeout_time);
vty_out(vty, " garbage collect after %u seconds\n", rip->garbage_time);
rip_zebra_ipv4_delete(rip, rp);
for (ALL_LIST_ELEMENTS_RO(list, listnode, rinfo)) {
- THREAD_OFF(rinfo->t_timeout);
- THREAD_OFF(rinfo->t_garbage_collect);
+ EVENT_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_garbage_collect);
rip_info_free(rinfo);
}
list_delete(&list);
rip_redistribute_disable(rip);
/* Cancel RIP related timers. */
- THREAD_OFF(rip->t_update);
- THREAD_OFF(rip->t_triggered_update);
- THREAD_OFF(rip->t_triggered_interval);
+ EVENT_OFF(rip->t_update);
+ EVENT_OFF(rip->t_triggered_update);
+ EVENT_OFF(rip->t_triggered_interval);
/* Cancel read thread. */
- THREAD_OFF(rip->t_read);
+ EVENT_OFF(rip->t_read);
/* Close RIP socket. */
close(rip->sock);
struct list *peer_list;
/* RIP threads. */
- struct thread *t_read;
+ struct event *t_read;
/* Update and garbage timer. */
- struct thread *t_update;
+ struct event *t_update;
/* Triggered update hack. */
int trigger;
- struct thread *t_triggered_update;
- struct thread *t_triggered_interval;
+ struct event *t_triggered_update;
+ struct event *t_triggered_interval;
/* RIP timer values. */
uint32_t update_time;
uint8_t flags;
/* Garbage collect timer. */
- struct thread *t_timeout;
- struct thread *t_garbage_collect;
+ struct event *t_timeout;
+ struct event *t_garbage_collect;
/* Route-map futures - this variables can be changed. */
struct in_addr nexthop_out;
struct route_map *routemap[RIP_FILTER_MAX];
/* Wake up thread. */
- struct thread *t_wakeup;
+ struct event *t_wakeup;
/* Interface statistics. */
int recv_badpackets;
int recv_badroutes;
/* Timeout thread. */
- struct thread *t_timeout;
+ struct event *t_timeout;
};
struct rip_distance {
};
/* Macro for timer turn on. */
-#define RIP_TIMER_ON(T,F,V) thread_add_timer (master, (F), rinfo, (V), &(T))
+#define RIP_TIMER_ON(T, F, V) event_add_timer(master, (F), rinfo, (V), &(T))
#define RIP_OFFSET_LIST_IN 0
#define RIP_OFFSET_LIST_OUT 1
extern void rip_route_map_init(void);
extern void rip_zebra_vrf_register(struct vrf *vrf);
extern void rip_zebra_vrf_deregister(struct vrf *vrf);
-extern void rip_zclient_init(struct thread_master *);
+extern void rip_zclient_init(struct event_loop *e);
extern void rip_zclient_stop(void);
extern int if_check_address(struct rip *rip, struct in_addr addr);
extern struct rip *rip_lookup_by_vrf_id(vrf_id_t vrf_id);
extern struct rip_instance_head rip_instances;
/* Master thread structure. */
-extern struct thread_master *master;
+extern struct event_loop *master;
DECLARE_HOOK(rip_ifaddr_add, (struct connected * ifc), (ifc));
DECLARE_HOOK(rip_ifaddr_del, (struct connected * ifc), (ifc));
#include "zclient.h"
#include "command.h"
#include "agg_table.h"
-#include "thread.h"
+#include "frrevent.h"
#include "privs.h"
#include "vrf.h"
#include "lib_errors.h"
ri = ifp->info;
- THREAD_OFF(ri->t_wakeup);
+ EVENT_OFF(ri->t_wakeup);
ripng = ri->ripng;
ri->enable_interface = 0;
ri->running = 0;
- THREAD_OFF(ri->t_wakeup);
+ EVENT_OFF(ri->t_wakeup);
}
}
}
/* Wake up interface. */
-static void ripng_interface_wakeup(struct thread *t)
+static void ripng_interface_wakeup(struct event *t)
{
struct interface *ifp;
struct ripng_interface *ri;
/* Get interface. */
- ifp = THREAD_ARG(t);
+ ifp = EVENT_ARG(t);
ri = ifp->info;
zlog_info("RIPng INTERFACE ON %s", ifp->name);
/* Add interface wake up thread. */
- thread_add_timer(master, ripng_interface_wakeup, ifp, 1,
- &ri->t_wakeup);
+ event_add_timer(master, ripng_interface_wakeup, ifp, 1,
+ &ri->t_wakeup);
ripng_connect_set(ifp, 1);
} else {
#include "vty.h"
#include "command.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "log.h"
#include "prefix.h"
#include "if.h"
/* Master of threads. */
-struct thread_master *master;
+struct event_loop *master;
static struct frr_daemon_info ripngd_di;
}
if (rinfo) {
- THREAD_OFF(rinfo->t_timeout);
- THREAD_OFF(rinfo->t_garbage_collect);
+ EVENT_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_garbage_collect);
listnode_delete(list, rinfo);
ripng_info_free(rinfo);
}
#include "prefix.h"
#include "command.h"
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "ripngd/ripngd.h"
static void ripng_peer_free(struct ripng_peer *peer)
{
- THREAD_OFF(peer->t_timeout);
+ EVENT_OFF(peer->t_timeout);
XFREE(MTYPE_RIPNG_PEER, peer);
}
/* RIPng peer is timeout.
* Garbage collector.
**/
-static void ripng_peer_timeout(struct thread *t)
+static void ripng_peer_timeout(struct event *t)
{
struct ripng_peer *peer;
- peer = THREAD_ARG(t);
+ peer = EVENT_ARG(t);
listnode_delete(peer->ripng->peer_list, peer);
ripng_peer_free(peer);
}
peer = ripng_peer_lookup(ripng, addr);
if (peer) {
- THREAD_OFF(peer->t_timeout);
+ EVENT_OFF(peer->t_timeout);
} else {
peer = ripng_peer_new();
peer->ripng = ripng;
}
/* Update timeout thread. */
- thread_add_timer(master, ripng_peer_timeout, peer,
- RIPNG_PEER_TIMER_DEFAULT, &peer->t_timeout);
+ event_add_timer(master, ripng_peer_timeout, peer,
+ RIPNG_PEER_TIMER_DEFAULT, &peer->t_timeout);
/* Last update time set. */
time(&peer->uptime);
};
/* Initialize zebra structure and it's commands. */
-void zebra_init(struct thread_master *master)
+void zebra_init(struct event_loop *master)
{
/* Allocate zebra structure. */
zclient = zclient_new(master, &zclient_options_default, ripng_handlers,
#include "prefix.h"
#include "filter.h"
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "memory.h"
#include "if.h"
#include "stream.h"
static void ripng_instance_enable(struct ripng *ripng, struct vrf *vrf,
int sock);
static void ripng_instance_disable(struct ripng *ripng);
-static void ripng_triggered_update(struct thread *);
+static void ripng_triggered_update(struct event *);
static void ripng_if_rmap_update(struct if_rmap_ctx *ctx,
struct if_rmap *if_rmap);
}
/* RIPng route garbage collect timer. */
-static void ripng_garbage_collect(struct thread *t)
+static void ripng_garbage_collect(struct event *t)
{
struct ripng_info *rinfo;
struct agg_node *rp;
- rinfo = THREAD_ARG(t);
+ rinfo = EVENT_ARG(t);
/* Off timeout timer. */
- THREAD_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_timeout);
/* Get route_node pointer. */
rp = rinfo->rp;
/* Re-use the first entry, and delete the others. */
for (ALL_LIST_ELEMENTS(list, node, nextnode, tmp_rinfo))
if (tmp_rinfo != rinfo) {
- THREAD_OFF(tmp_rinfo->t_timeout);
- THREAD_OFF(tmp_rinfo->t_garbage_collect);
+ EVENT_OFF(tmp_rinfo->t_timeout);
+ EVENT_OFF(tmp_rinfo->t_garbage_collect);
list_delete_node(list, node);
ripng_info_free(tmp_rinfo);
}
- THREAD_OFF(rinfo->t_timeout);
- THREAD_OFF(rinfo->t_garbage_collect);
+ EVENT_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_garbage_collect);
memcpy(rinfo, rinfo_new, sizeof(struct ripng_info));
if (ripng_route_rte(rinfo)) {
struct agg_node *rp = rinfo->rp;
struct list *list = (struct list *)rp->info;
- THREAD_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_timeout);
if (rinfo->metric != RIPNG_METRIC_INFINITY)
ripng_aggregate_decrement(rp, rinfo);
if (listcount(list) > 1) {
/* Some other ECMP entries still exist. Just delete this entry.
*/
- THREAD_OFF(rinfo->t_garbage_collect);
+ EVENT_OFF(rinfo->t_garbage_collect);
listnode_delete(list, rinfo);
if (ripng_route_rte(rinfo)
&& CHECK_FLAG(rinfo->flags, RIPNG_RTF_FIB))
}
/* Timeout RIPng routes. */
-static void ripng_timeout(struct thread *t)
+static void ripng_timeout(struct event *t)
{
- struct ripng_info *rinfo = THREAD_ARG(t);
+ struct ripng_info *rinfo = EVENT_ARG(t);
struct ripng *ripng = ripng_info_get_instance(rinfo);
ripng_ecmp_delete(ripng, rinfo);
static void ripng_timeout_update(struct ripng *ripng, struct ripng_info *rinfo)
{
if (rinfo->metric != RIPNG_METRIC_INFINITY) {
- THREAD_OFF(rinfo->t_timeout);
- thread_add_timer(master, ripng_timeout, rinfo,
- ripng->timeout_time, &rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_timeout);
+ event_add_timer(master, ripng_timeout, rinfo,
+ ripng->timeout_time, &rinfo->t_timeout);
}
}
* but
* highly recommended".
*/
- if (!ripng->ecmp && !same && rinfo->metric == rte->metric
- && rinfo->t_timeout
- && (thread_timer_remain_second(rinfo->t_timeout)
- < (ripng->timeout_time / 2))) {
+ if (!ripng->ecmp && !same && rinfo->metric == rte->metric &&
+ rinfo->t_timeout &&
+ (event_timer_remain_second(rinfo->t_timeout) <
+ (ripng->timeout_time / 2))) {
ripng_ecmp_replace(ripng, &newinfo);
}
/* Next, compare the metrics. If the datagram is from the same
router as the existing route, and the new metric is different
than the old one; or, if the new metric is lower than the old
one; do the following actions: */
- else if ((same && rinfo->metric != rte->metric)
- || rte->metric < rinfo->metric) {
+ else if ((same && rinfo->metric != rte->metric) ||
+ rte->metric < rinfo->metric) {
if (listcount(list) == 1) {
if (newinfo.metric != RIPNG_METRIC_INFINITY)
ripng_ecmp_replace(ripng, &newinfo);
RIPNG_TIMER_ON(rinfo->t_garbage_collect,
ripng_garbage_collect,
ripng->garbage_time);
- THREAD_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_timeout);
/* Aggregate count decrement. */
ripng_aggregate_decrement(rp, rinfo);
RIPNG_TIMER_ON(rinfo->t_garbage_collect,
ripng_garbage_collect,
ripng->garbage_time);
- THREAD_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_timeout);
/* Aggregate count decrement. */
ripng_aggregate_decrement(rp, rinfo);
}
/* First entry point of reading RIPng packet. */
-static void ripng_read(struct thread *thread)
+static void ripng_read(struct event *thread)
{
- struct ripng *ripng = THREAD_ARG(thread);
+ struct ripng *ripng = EVENT_ARG(thread);
int len;
int sock;
struct sockaddr_in6 from;
/* Fetch thread data and set read pointer to empty for event
managing. `sock' sould be same as ripng->sock. */
- sock = THREAD_FD(thread);
+ sock = EVENT_FD(thread);
/* Add myself to the next event. */
ripng_event(ripng, RIPNG_READ, sock);
/* Regular update of RIPng route. Send all routing formation to RIPng
enabled interface. */
-static void ripng_update(struct thread *t)
+static void ripng_update(struct event *t)
{
- struct ripng *ripng = THREAD_ARG(t);
+ struct ripng *ripng = EVENT_ARG(t);
struct interface *ifp;
struct ripng_interface *ri;
/* Triggered updates may be suppressed if a regular update is due by
the time the triggered update would be sent. */
- THREAD_OFF(ripng->t_triggered_interval);
+ EVENT_OFF(ripng->t_triggered_interval);
ripng->trigger = 0;
/* Reset flush event. */
}
/* Triggered update interval timer. */
-static void ripng_triggered_interval(struct thread *t)
+static void ripng_triggered_interval(struct event *t)
{
- struct ripng *ripng = THREAD_ARG(t);
+ struct ripng *ripng = EVENT_ARG(t);
if (ripng->trigger) {
ripng->trigger = 0;
}
/* Execute triggered update. */
-void ripng_triggered_update(struct thread *t)
+void ripng_triggered_update(struct event *t)
{
- struct ripng *ripng = THREAD_ARG(t);
+ struct ripng *ripng = EVENT_ARG(t);
struct interface *ifp;
struct ripng_interface *ri;
int interval;
/* Cancel interval timer. */
- THREAD_OFF(ripng->t_triggered_interval);
+ EVENT_OFF(ripng->t_triggered_interval);
ripng->trigger = 0;
/* Logging triggered update. */
update is triggered when the timer expires. */
interval = (frr_weak_random() % 5) + 1;
- thread_add_timer(master, ripng_triggered_interval, ripng, interval,
- &ripng->t_triggered_interval);
+ event_add_timer(master, ripng_triggered_interval, ripng, interval,
+ &ripng->t_triggered_interval);
}
/* Write routing table entry to the stream and return next index of
switch (event) {
case RIPNG_READ:
- thread_add_read(master, ripng_read, ripng, sock,
- &ripng->t_read);
+ event_add_read(master, ripng_read, ripng, sock, &ripng->t_read);
break;
case RIPNG_UPDATE_EVENT:
- THREAD_OFF(ripng->t_update);
+ EVENT_OFF(ripng->t_update);
/* Update timer jitter. */
jitter = ripng_update_jitter(ripng->update_time);
- thread_add_timer(master, ripng_update, ripng,
- sock ? 2 : ripng->update_time + jitter,
- &ripng->t_update);
+ event_add_timer(master, ripng_update, ripng,
+ sock ? 2 : ripng->update_time + jitter,
+ &ripng->t_update);
break;
case RIPNG_TRIGGERED_UPDATE:
if (ripng->t_triggered_interval)
ripng->trigger = 1;
else
- thread_add_event(master, ripng_triggered_update, ripng,
- 0, &ripng->t_triggered_update);
+ event_add_event(master, ripng_triggered_update, ripng,
+ 0, &ripng->t_triggered_update);
break;
case RIPNG_ZEBRA:
case RIPNG_REQUEST_EVENT:
struct tm tm;
#define TIME_BUF 25
char timebuf[TIME_BUF];
- struct thread *thread;
+ struct event *thread;
if ((thread = rinfo->t_timeout) != NULL) {
- clock = thread_timer_remain_second(thread);
+ clock = event_timer_remain_second(thread);
gmtime_r(&clock, &tm);
strftime(timebuf, TIME_BUF, "%M:%S", &tm);
vty_out(vty, "%5s", timebuf);
} else if ((thread = rinfo->t_garbage_collect) != NULL) {
- clock = thread_timer_remain_second(thread);
+ clock = event_timer_remain_second(thread);
gmtime_r(&clock, &tm);
strftime(timebuf, TIME_BUF, "%M:%S", &tm);
vty_out(vty, "%5s", timebuf);
vty_out(vty, " Sending updates every %u seconds with +/-50%%,",
ripng->update_time);
vty_out(vty, " next due in %lu seconds\n",
- thread_timer_remain_second(ripng->t_update));
+ event_timer_remain_second(ripng->t_update));
vty_out(vty, " Timeout after %u seconds,", ripng->timeout_time);
vty_out(vty, " garbage collect after %u seconds\n",
ripng->garbage_time);
/* Drop all other entries, except the first one. */
for (ALL_LIST_ELEMENTS(list, node, nextnode, tmp_rinfo))
if (tmp_rinfo != rinfo) {
- THREAD_OFF(tmp_rinfo->t_timeout);
- THREAD_OFF(
- tmp_rinfo->t_garbage_collect);
+ EVENT_OFF(tmp_rinfo->t_timeout);
+ EVENT_OFF(tmp_rinfo->t_garbage_collect);
list_delete_node(list, node);
ripng_info_free(tmp_rinfo);
}
ripng_zebra_ipv6_delete(ripng, rp);
for (ALL_LIST_ELEMENTS_RO(list, listnode, rinfo)) {
- THREAD_OFF(rinfo->t_timeout);
- THREAD_OFF(rinfo->t_garbage_collect);
+ EVENT_OFF(rinfo->t_timeout);
+ EVENT_OFF(rinfo->t_garbage_collect);
ripng_info_free(rinfo);
}
list_delete(&list);
ripng_redistribute_disable(ripng);
/* Cancel the RIPng timers */
- THREAD_OFF(ripng->t_update);
- THREAD_OFF(ripng->t_triggered_update);
- THREAD_OFF(ripng->t_triggered_interval);
+ EVENT_OFF(ripng->t_update);
+ EVENT_OFF(ripng->t_triggered_update);
+ EVENT_OFF(ripng->t_triggered_interval);
/* Cancel the read thread */
- THREAD_OFF(ripng->t_read);
+ EVENT_OFF(ripng->t_read);
/* Close the RIPng socket */
if (ripng->sock >= 0) {
struct list *offset_list_master;
/* RIPng threads. */
- struct thread *t_read;
- struct thread *t_update;
+ struct event *t_read;
+ struct event *t_update;
/* Triggered update hack. */
int trigger;
- struct thread *t_triggered_update;
- struct thread *t_triggered_interval;
+ struct event *t_triggered_update;
+ struct event *t_triggered_interval;
/* RIPng ECMP flag */
bool ecmp;
uint8_t flags;
/* Garbage collect timer. */
- struct thread *t_timeout;
- struct thread *t_garbage_collect;
+ struct event *t_timeout;
+ struct event *t_garbage_collect;
/* Route-map features - this variables can be changed. */
struct in6_addr nexthop_out;
uint8_t default_only;
/* Wake up thread. */
- struct thread *t_wakeup;
+ struct event *t_wakeup;
/* Passive interface. */
int passive;
int recv_badroutes;
/* Timeout thread. */
- struct thread *t_timeout;
+ struct event *t_timeout;
};
/* All RIPng events. */
};
/* RIPng timer on/off macro. */
-#define RIPNG_TIMER_ON(T,F,V) thread_add_timer (master, (F), rinfo, (V), &(T))
+#define RIPNG_TIMER_ON(T, F, V) event_add_timer(master, (F), rinfo, (V), &(T))
#define RIPNG_OFFSET_LIST_IN 0
#define RIPNG_OFFSET_LIST_OUT 1
/* Extern variables. */
extern struct zebra_privs_t ripngd_privs;
-extern struct thread_master *master;
+extern struct event_loop *master;
extern struct ripng_instance_head ripng_instances;
/* Prototypes. */
extern void ripng_zebra_vrf_deregister(struct vrf *vrf);
extern void ripng_terminate(void);
/* zclient_init() is done by ripng_zebra.c:zebra_init() */
-extern void zebra_init(struct thread_master *);
+extern void zebra_init(struct event_loop *master);
extern void ripng_zebra_stop(void);
extern void ripng_redistribute_conf_update(struct ripng *ripng, int type);
extern void ripng_redistribute_conf_delete(struct ripng *ripng, int type);
#include "prefix.h"
#include "nexthop.h"
#include "log.h"
-#include "thread.h"
+#include "frrevent.h"
#include "vrf.h"
#include "zclient.h"
#include "frr_pthread.h"
static struct rusage lp_rusage;
static struct vty *lp_vty;
-extern struct thread_master *master;
+extern struct event_loop *master;
-static void logpump_done(struct thread *thread)
+static void logpump_done(struct event *thread)
{
double x;
getrusage(RUSAGE_SELF, &lp_rusage);
#endif
- thread_add_timer_msec(master, logpump_done, NULL, 0, NULL);
+ event_add_timer_msec(master, logpump_done, NULL, 0, NULL);
return NULL;
}
#include <lib/version.h>
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include "prefix.h"
#include "linklist.h"
#include "if.h"
struct option longopts[] = {{0}};
/* Master of threads. */
-struct thread_master *master;
+struct event_loop *master;
/* SIGHUP handler. */
static void sighup(void)
*/
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "command.h"
#include "network.h"
#include "prefix.h"
struct zclient *zclient = NULL;
/* For registering threads. */
-extern struct thread_master *master;
+extern struct event_loop *master;
/* Privs info */
extern struct zebra_privs_t sharp_privs;
bfd_sess_install(sn->bsp);
}
-void static_bfd_initialize(struct zclient *zc, struct thread_master *tm)
+void static_bfd_initialize(struct zclient *zc, struct event_loop *tm)
{
/* Initialize BFD integration library. */
bfd_protocol_integration_init(zc, tm);
#include <lib/version.h>
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include "command.h"
#include "log.h"
#include "memory.h"
#include "static_debug.h"
#include "static_nb.h"
+#include "mgmt_be_client.h"
+
char backup_config_file[256];
bool mpls_enabled;
struct option longopts[] = { { 0 } };
/* Master of threads. */
-struct thread_master *master;
+struct event_loop *master;
+
+uintptr_t mgmt_lib_hndl;
static struct frr_daemon_info staticd_di;
/* SIGHUP handler. */
/* Disable BFD events to avoid wasting processing. */
bfd_protocol_integration_set_shutdown(true);
+ mgmt_be_client_lib_destroy(mgmt_lib_hndl);
+
static_vrf_terminate();
static_zebra_stop();
},
};
+static void static_mgmt_be_client_connect(uintptr_t lib_hndl,
+ uintptr_t usr_data, bool connected)
+{
+ (void)usr_data;
+
+ assert(lib_hndl == mgmt_lib_hndl);
+
+ zlog_debug("Got %s %s MGMTD Backend Client Server",
+ connected ? "connected" : "disconnected",
+ connected ? "to" : "from");
+
+ if (connected)
+ (void)mgmt_be_subscribe_yang_data(mgmt_lib_hndl, NULL, 0);
+}
+
+#if 0
+static void
+static_mgmt_txn_notify(uintptr_t lib_hndl, uintptr_t usr_data,
+ struct mgmt_be_client_txn_ctx *txn_ctx,
+ bool destroyed)
+{
+ zlog_debug("Got Txn %s Notify from MGMTD server",
+ destroyed ? "DESTROY" : "CREATE");
+
+ if (!destroyed) {
+ /*
+ * TODO: Allocate and install a private scratchpad for this
+ * transaction if required
+ */
+ } else {
+ /*
+ * TODO: Uninstall and deallocate the private scratchpad for
+ * this transaction if installed earlier.
+ */
+ }
+}
+#endif
+
+static struct mgmt_be_client_params mgmt_params = {
+ .name = "staticd",
+ .conn_retry_intvl_sec = 3,
+ .client_connect_notify = static_mgmt_be_client_connect,
+ .txn_notify = NULL, /* static_mgmt_txn_notify */
+};
+
static const struct frr_yang_module_info *const staticd_yang_modules[] = {
&frr_filter_info,
&frr_interface_info,
static_zebra_init();
static_vty_init();
+ /* Initialize MGMT backend functionalities */
+ mgmt_lib_hndl = mgmt_be_client_lib_init(&mgmt_params, master);
+ assert(mgmt_lib_hndl);
+
hook_register(routing_conf_event,
routing_control_plane_protocols_name_validate);
static_ifindex_update_af(ifp, up, AFI_IP6, SAFI_MULTICAST);
}
-void static_get_nh_type(enum static_nh_type stype, char *type, size_t size)
-{
- switch (stype) {
- case STATIC_IFNAME:
- strlcpy(type, "ifindex", size);
- break;
- case STATIC_IPV4_GATEWAY:
- strlcpy(type, "ip4", size);
- break;
- case STATIC_IPV4_GATEWAY_IFNAME:
- strlcpy(type, "ip4-ifindex", size);
- break;
- case STATIC_BLACKHOLE:
- strlcpy(type, "blackhole", size);
- break;
- case STATIC_IPV6_GATEWAY:
- strlcpy(type, "ip6", size);
- break;
- case STATIC_IPV6_GATEWAY_IFNAME:
- strlcpy(type, "ip6-ifindex", size);
- break;
- };
-}
-
struct stable_info *static_get_stable_info(struct route_node *rn)
{
struct route_table *table;
return (struct static_route_info *)(rn->info);
}
+static inline void static_get_nh_type(enum static_nh_type stype, char *type,
+ size_t size)
+{
+ switch (stype) {
+ case STATIC_IFNAME:
+ strlcpy(type, "ifindex", size);
+ break;
+ case STATIC_IPV4_GATEWAY:
+ strlcpy(type, "ip4", size);
+ break;
+ case STATIC_IPV4_GATEWAY_IFNAME:
+ strlcpy(type, "ip4-ifindex", size);
+ break;
+ case STATIC_BLACKHOLE:
+ strlcpy(type, "blackhole", size);
+ break;
+ case STATIC_IPV6_GATEWAY:
+ strlcpy(type, "ip6", size);
+ break;
+ case STATIC_IPV6_GATEWAY_IFNAME:
+ strlcpy(type, "ip6-ifindex", size);
+ break;
+ };
+}
+
extern bool mpls_enabled;
extern uint32_t zebra_ecmp_count;
uint32_t table_id, uint8_t distance);
extern void static_del_path(struct static_path *pn);
-extern void static_get_nh_type(enum static_nh_type stype, char *type,
- size_t size);
extern bool static_add_nexthop_validate(const char *nh_vrf_name,
enum static_nh_type type,
struct ipaddr *ipaddr);
extern void static_next_hop_bfd_multi_hop(struct static_nexthop *sn, bool mhop);
/** Call this function after zebra client initialization. */
-extern void static_bfd_initialize(struct zclient *zc, struct thread_master *tm);
+extern void static_bfd_initialize(struct zclient *zc, struct event_loop *tm);
extern void static_bfd_show(struct vty *vty, bool isjson);
nb_cli_enqueue_change(vty, ab_xpath,
NB_OP_MODIFY, "false");
}
- if (type == STATIC_IPV4_GATEWAY
- || type == STATIC_IPV6_GATEWAY
- || type == STATIC_IPV4_GATEWAY_IFNAME
- || type == STATIC_IPV6_GATEWAY_IFNAME) {
+ if (type == STATIC_IPV4_GATEWAY ||
+ type == STATIC_IPV6_GATEWAY ||
+ type == STATIC_IPV4_GATEWAY_IFNAME ||
+ type == STATIC_IPV6_GATEWAY_IFNAME) {
strlcpy(ab_xpath, xpath_nexthop, sizeof(ab_xpath));
strlcat(ab_xpath, FRR_STATIC_ROUTE_NH_COLOR_XPATH,
sizeof(ab_xpath));
ret = nb_cli_apply_changes(vty, "%s", xpath_prefix);
} else {
- if (args->source)
- snprintf(ab_xpath, sizeof(ab_xpath),
- FRR_DEL_S_ROUTE_SRC_NH_KEY_NO_DISTANCE_XPATH,
- "frr-staticd:staticd", "staticd", args->vrf,
- buf_prefix,
- yang_afi_safi_value2identity(args->afi,
- args->safi),
- buf_src_prefix, table_id, buf_nh_type,
- args->nexthop_vrf, buf_gate_str,
- args->interface_name);
- else
- snprintf(ab_xpath, sizeof(ab_xpath),
- FRR_DEL_S_ROUTE_NH_KEY_NO_DISTANCE_XPATH,
- "frr-staticd:staticd", "staticd", args->vrf,
- buf_prefix,
- yang_afi_safi_value2identity(args->afi,
- args->safi),
- table_id, buf_nh_type, args->nexthop_vrf,
- buf_gate_str, args->interface_name);
+ if (args->source) {
+ if (args->distance)
+ snprintf(ab_xpath, sizeof(ab_xpath),
+ FRR_DEL_S_ROUTE_SRC_NH_KEY_XPATH,
+ "frr-staticd:staticd", "staticd",
+ args->vrf, buf_prefix,
+ yang_afi_safi_value2identity(
+ args->afi, args->safi),
+ buf_src_prefix, table_id, distance,
+ buf_nh_type, args->nexthop_vrf,
+ buf_gate_str, args->interface_name);
+ else
+ snprintf(
+ ab_xpath, sizeof(ab_xpath),
+ FRR_DEL_S_ROUTE_SRC_NH_KEY_NO_DISTANCE_XPATH,
+ "frr-staticd:staticd", "staticd",
+ args->vrf, buf_prefix,
+ yang_afi_safi_value2identity(
+ args->afi, args->safi),
+ buf_src_prefix, table_id, buf_nh_type,
+ args->nexthop_vrf, buf_gate_str,
+ args->interface_name);
+ } else {
+ if (args->distance)
+ snprintf(ab_xpath, sizeof(ab_xpath),
+ FRR_DEL_S_ROUTE_NH_KEY_XPATH,
+ "frr-staticd:staticd", "staticd",
+ args->vrf, buf_prefix,
+ yang_afi_safi_value2identity(
+ args->afi, args->safi),
+ table_id, distance, buf_nh_type,
+ args->nexthop_vrf, buf_gate_str,
+ args->interface_name);
+ else
+ snprintf(
+ ab_xpath, sizeof(ab_xpath),
+ FRR_DEL_S_ROUTE_NH_KEY_NO_DISTANCE_XPATH,
+ "frr-staticd:staticd", "staticd",
+ args->vrf, buf_prefix,
+ yang_afi_safi_value2identity(
+ args->afi, args->safi),
+ table_id, buf_nh_type,
+ args->nexthop_vrf, buf_gate_str,
+ args->interface_name);
+ }
dnode = yang_dnode_get(vty->candidate_config->dnode, ab_xpath);
if (!dnode) {
"Debug route\n"
"Debug bfd\n")
{
+#ifndef INCLUDE_MGMTD_CMDDEFS_ONLY
/* If no specific category, change all */
if (strmatch(argv[argc - 1]->text, "static"))
static_debug_set(vty->node, !no, true, true, true);
else
static_debug_set(vty->node, !no, !!events, !!route, !!bfd);
+#endif /* ifndef INCLUDE_MGMTD_CMDDEFS_ONLY */
return CMD_SUCCESS;
}
+#ifndef INCLUDE_MGMTD_CMDDEFS_ONLY
DEFPY(staticd_show_bfd_routes, staticd_show_bfd_routes_cmd,
"show bfd static route [json]$isjson",
SHOW_STR
.config_write = static_config_write_debug,
};
+#endif /* ifndef INCLUDE_MGMTD_CMDDEFS_ONLY */
+
void static_vty_init(void)
{
+#ifndef INCLUDE_MGMTD_CMDDEFS_ONLY
install_node(&debug_node);
+ install_element(ENABLE_NODE, &show_debugging_static_cmd);
+ install_element(ENABLE_NODE, &staticd_show_bfd_routes_cmd);
+#endif /* ifndef INCLUDE_MGMTD_CMDDEFS_ONLY */
install_element(CONFIG_NODE, &ip_mroute_dist_cmd);
install_element(CONFIG_NODE, &ipv6_route_cmd);
install_element(VRF_NODE, &ipv6_route_vrf_cmd);
- install_element(ENABLE_NODE, &show_debugging_static_cmd);
install_element(ENABLE_NODE, &debug_staticd_cmd);
install_element(CONFIG_NODE, &debug_staticd_cmd);
-
- install_element(ENABLE_NODE, &staticd_show_bfd_routes_cmd);
}
*/
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "command.h"
#include "network.h"
#include "prefix.h"
extern "C" {
#endif
-extern struct thread_master *master;
+extern struct event_loop *master;
extern void static_zebra_nht_register(struct static_nexthop *nh, bool reg);
/* need these to link in libbgp */
struct zebra_privs_t bgpd_privs = {};
-struct thread_master *master = NULL;
+struct event_loop *master = NULL;
static int failed = 0;
{
int i = 0;
qobj_init();
- bgp_master_init(thread_master_create(NULL), BGP_SOCKET_SNDBUF_SIZE,
+ bgp_master_init(event_master_create(NULL), BGP_SOCKET_SNDBUF_SIZE,
list_new());
master = bm->master;
bgp_option_set(BGP_OPT_NO_LISTEN);
/* need these to link in libbgp */
struct zebra_privs_t bgpd_privs = {};
-struct thread_master *master = NULL;
+struct event_loop *master = NULL;
static int failed = 0;
static int tty = 0;
term_bgp_debug_as4 = -1UL;
qobj_init();
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE, list_new());
vrf_init(NULL, NULL, NULL, NULL);
bgp_option_set(BGP_OPT_NO_LISTEN);
/* need these to link in libbgp */
struct zebra_privs_t bgpd_privs = {};
-struct thread_master *master = NULL;
+struct event_loop *master = NULL;
static int failed = 0;
/* need these to link in libbgp */
struct zebra_privs_t bgpd_privs = {};
-struct thread_master *master = NULL;
+struct event_loop *master = NULL;
static int failed = 0;
static int tty = 0;
qobj_init();
cmd_init(0);
bgp_vty_init();
- master = thread_master_create("test mp attr");
+ master = event_master_create("test mp attr");
bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE, list_new());
vrf_init(NULL, NULL, NULL, NULL);
bgp_option_set(BGP_OPT_NO_LISTEN);
};
/* need these to link in libbgp */
-struct thread_master *master = NULL;
+struct event_loop *master = NULL;
extern struct zclient *zclient;
struct zebra_privs_t bgpd_privs = {
.user = NULL,
static int global_test_init(void)
{
qobj_init();
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
zclient = zclient_new(master, &zclient_options_default, NULL, 0);
bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE, list_new());
vrf_init(NULL, NULL, NULL, NULL);
{
if (zclient != NULL)
zclient_free(zclient);
- thread_master_free(master);
+ event_master_free(master);
return 0;
}
/* need these to link in libbgp */
struct zebra_privs_t bgpd_privs = {};
-struct thread_master *master = NULL;
+struct event_loop *master = NULL;
static struct bgp *bgp;
static as_t asn = 100;
{
struct peer *peer;
int i, j;
- struct thread t;
+ struct event t;
qobj_init();
bgp_attr_init();
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE, list_new());
vrf_init(NULL, NULL, NULL, NULL);
bgp_option_set(BGP_OPT_NO_LISTEN);
/* Required variables to link in libbgp */
struct zebra_privs_t bgpd_privs = {0};
-struct thread_master *master;
+struct event_loop *master;
enum test_state {
TEST_SUCCESS,
zprivs_preinit(&bgpd_privs);
zprivs_init(&bgpd_privs);
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
nb_init(master, NULL, 0, false);
bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE, list_new());
bgp_option_set(BGP_OPT_NO_LISTEN);
nb_terminate();
yang_terminate();
zprivs_terminate(&bgpd_privs);
- thread_master_free(master);
+ event_master_free(master);
master = NULL;
}
#include <lib/version.h>
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "command.h"
#include "memory.h"
extern void test_init(void);
-struct thread_master *master;
+struct event_loop *master;
struct option longopts[] = {{"daemon", no_argument, NULL, 'd'},
{"config_file", required_argument, NULL, 'f'},
}
static int timer_count;
-static void test_timer(struct thread *thread)
+static void test_timer(struct event *thread)
{
- int *count = THREAD_ARG(thread);
+ int *count = EVENT_ARG(thread);
printf("run %d of timer\n", (*count)++);
- thread_add_timer(master, test_timer, count, 5, NULL);
+ event_add_timer(master, test_timer, count, 5, NULL);
}
static void test_timer_init(void)
{
- thread_add_timer(master, test_timer, &timer_count, 10, NULL);
+ event_add_timer(master, test_timer, &timer_count, 10, NULL);
}
static void test_vty_init(void)
int vty_port = 4000;
int daemon_mode = 0;
char *progname;
- struct thread thread;
+ struct event thread;
char *config_file = NULL;
/* Set umask before anything for security */
progname = ((p = strrchr(argv[0], '/')) ? ++p : argv[0]);
/* master init. */
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
while (1) {
int opt;
test_init();
/* Fetch next active thread. */
- while (thread_fetch(master, &thread))
- thread_call(&thread);
+ while (event_fetch(master, &thread))
+ event_call(&thread);
/* Not reached. */
exit(0);
#include "test_common.h"
-struct thread_master *master;
+struct event_loop *master;
struct zebra_privs_t isisd_privs;
int isis_sock_init(struct isis_circuit *circuit)
struct lspdb_head lspdb[]);
/* Global variables. */
-extern struct thread_master *master;
+extern struct event_loop *master;
extern struct zebra_privs_t isisd_privs;
extern struct isis_topology test_topologies[];
#include "memory.h"
#include "sbuf.h"
#include "stream.h"
-#include "thread.h"
+#include "frrevent.h"
#include "isisd/isis_circuit.h"
#include "isisd/isis_tlvs.h"
#include <lib/version.h>
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "command.h"
#include "log.h"
cmd_terminate();
vty_terminate();
yang_terminate();
- thread_master_free(master);
+ event_master_free(master);
log_memstats(stderr, "test-isis-spf");
if (!isexit)
{
char *p;
char *progname;
- struct thread thread;
+ struct event thread;
bool debug = false;
/* Set umask before anything for security */
}
/* master init. */
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
isis_master_init(master);
/* Library inits. */
vty_stdio(vty_do_exit);
/* Fetch next active thread. */
- while (thread_fetch(master, &thread))
- thread_call(&thread);
+ while (event_fetch(master, &thread))
+ event_call(&thread);
/* Not reached. */
exit(0);
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "command.h"
#include "memory.h"
#include "common_cli.h"
-struct thread_master *master;
+struct event_loop *master;
int dump_args(struct vty *vty, const char *descr, int argc,
struct cmd_token *argv[])
vty_terminate();
nb_terminate();
yang_terminate();
- thread_master_free(master);
+ event_master_free(master);
log_memstats(stderr, "testcli");
if (!isexit)
/* main routine. */
int main(int argc, char **argv)
{
- struct thread thread;
+ struct event thread;
size_t yangcount;
/* Set umask before anything for security */
umask(0027);
/* master init. */
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
zlog_aux_init("NONE: ", test_log_prio);
vty_stdio(vty_do_exit);
/* Fetch next active thread. */
- while (thread_fetch(master, &thread))
- thread_call(&thread);
+ while (event_fetch(master, &thread))
+ event_call(&thread);
/* Not reached. */
exit(0);
/* functions provided by common cli
* (includes main())
*/
-extern struct thread_master *master;
+extern struct event_loop *master;
extern int test_log_prio;
extern struct cmd_node vty_node;
extern void test_init_cmd(void); /* provided in test-commands-defun.c */
-struct thread_master *master; /* dummy for libfrr*/
+struct event_loop *master; /* dummy for libfrr*/
static vector test_cmds;
static char test_buf[32768];
#include "lib/frr_pthread.h"
#include "lib/frratomic.h"
#include "lib/frrstr.h"
-#include "lib/getopt.h"
#include "lib/graph.h"
#include "lib/hash.h"
#include "lib/hook.h"
#include "lib/stream.h"
#include "lib/table.h"
#include "lib/termtable.h"
-#include "lib/thread.h"
+#include "frrevent.h"
#include "lib/typesafe.h"
#include "lib/typerb.h"
#include "lib/vector.h"
cfg->fd = fd;
cmd_hostname_set("TEST");
- cfg->master = thread_master_create("TEST");
+ cfg->master = event_master_create("TEST");
zlog_5424_apply_dst(cfg);
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "command.h"
#include "memory.h"
#include "log.h"
#include "northbound.h"
-static struct thread_master *master;
+static struct event_loop *master;
struct troute {
struct prefix_ipv4 prefix;
vty_terminate();
nb_terminate();
yang_terminate();
- thread_master_free(master);
+ event_master_free(master);
log_memstats(stderr, "test-nb-oper-data");
if (!isexit)
/* main routine. */
int main(int argc, char **argv)
{
- struct thread thread;
+ struct event thread;
unsigned int num_vrfs = 2;
unsigned int num_interfaces = 4;
unsigned int num_routes = 6;
umask(0027);
/* master init. */
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
zlog_aux_init("NONE: ", ZLOG_DISABLED);
vty_stdio(vty_do_exit);
/* Fetch next active thread. */
- while (thread_fetch(master, &thread))
- thread_call(&thread);
+ while (event_fetch(master, &thread))
+ event_call(&thread);
/* Not reached. */
exit(0);
#include <zebra.h>
#include "lib/zlog.h"
-#include "lib/thread.h"
+#include "frrevent.h"
#include "lib/sigevent.h"
int main(int argc, char **argv)
{
int number = 10;
- struct thread_master *master;
+ struct event_loop *master;
zlog_aux_init("NONE: ", LOG_DEBUG);
assertf(number > 1, "(B) the number was %d", number);
/* set up SIGABRT handler */
- master = thread_master_create("test");
+ master = event_master_create("test");
signal_init(master, 0, NULL);
func_for_bt(number);
#include <lib_vty.h>
#include <buffer.h>
-struct thread_master *master;
+struct event_loop *master;
int main(int argc, char **argv)
{
#include "network.h"
#include "prng.h"
-struct thread_master *master;
+struct event_loop *master;
struct acc_vals {
int c0;
#include "libfrr.h"
#include "routing_nb.h"
#include "northbound_cli.h"
-#include "thread.h"
+#include "frrevent.h"
#include "vrf.h"
#include "vty.h"
#include <grpcpp/security/credentials.h>
#include "grpc/frr-northbound.grpc.pb.h"
-DEFINE_HOOK(frr_late_init, (struct thread_master * tm), (tm));
+DEFINE_HOOK(frr_late_init, (struct event_loop * tm), (tm));
DEFINE_KOOH(frr_fini, (), ());
struct vty *vty;
bool mpls_enabled;
-struct thread_master *master;
+struct event_loop *master;
struct zebra_privs_t static_privs = {0};
struct frrmod_runtime *grpc_module;
char binpath[2 * MAXPATHLEN + 1];
&frr_staticd_info, &frr_vrf_info,
};
-static void grpc_thread_stop(struct thread *thread);
+static void grpc_thread_stop(struct event *thread);
static void _err_print(const void *cookie, const char *errstr)
{
static_debug_init();
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
nb_init(master, staticd_yang_modules, array_size(staticd_yang_modules),
false);
cmd_terminate();
nb_terminate();
yang_terminate();
- thread_master_free(master);
+ event_master_free(master);
master = NULL;
}
// Signal FRR event loop to stop
test_debug("client: pthread: adding event to stop us");
- thread_add_event(master, grpc_thread_stop, NULL, 0, NULL);
+ event_add_event(master, grpc_thread_stop, NULL, 0, NULL);
test_debug("client: pthread: DONE (returning)");
return NULL;
}
-static void grpc_thread_start(struct thread *thread)
+static void grpc_thread_start(struct event *thread)
{
struct frr_pthread_attr client = {
.start = grpc_client_test_start,
frr_pthread_wait_running(pth);
}
-static void grpc_thread_stop(struct thread *thread)
+static void grpc_thread_stop(struct event *thread)
{
std::cout << __func__ << ": frr_pthread_stop_all" << std::endl;
frr_pthread_stop_all();
static_startup();
- thread_add_event(master, grpc_thread_start, NULL, 0, NULL);
+ event_add_event(master, grpc_thread_start, NULL, 0, NULL);
/* Event Loop */
- struct thread thread;
- while (thread_fetch(master, &thread))
- thread_call(&thread);
+ struct event thread;
+ while (event_fetch(master, &thread))
+ event_call(&thread);
return 0;
}
*/
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "command.h"
#include "memory.h"
#include <zebra.h>
#include <math.h>
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "command.h"
#include "memory.h"
#include "tests.h"
-extern struct thread_master *master;
+extern struct event_loop *master;
enum { ITERS_FIRST = 0,
ITERS_ERR = 100,
printf("%s did %d, x = %g\n", str, i, x);
}
-static void clear_something(struct thread *thread)
+static void clear_something(struct event *thread)
{
- struct work_state *ws = THREAD_ARG(thread);
+ struct work_state *ws = EVENT_ARG(thread);
/* this could be like iterating through 150k of route_table
* or worse, iterating through a list of peers, to bgp_stop them with
while (ws->i < ITERS_MAX) {
slow_func(ws->vty, ws->str, ws->i);
ws->i++;
- if (thread_should_yield(thread)) {
- thread_add_timer_msec(master, clear_something, ws, 0,
- NULL);
+ if (event_should_yield(thread)) {
+ event_add_timer_msec(master, clear_something, ws, 0,
+ NULL);
return;
}
}
ws->vty = vty;
ws->i = ITERS_FIRST;
- thread_add_timer_msec(master, clear_something, ws, 0, NULL);
+ event_add_timer_msec(master, clear_something, ws, 0, NULL);
return CMD_SUCCESS;
}
*/
#include <zebra.h>
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "command.h"
#include "memory.h"
DEFINE_MTYPE_STATIC(TEST_HEAVYWQ, WQ_NODE, "heavy_wq_node");
DEFINE_MTYPE_STATIC(TEST_HEAVYWQ, WQ_NODE_STR, "heavy_wq_node->str");
-extern struct thread_master *master;
+extern struct event_loop *master;
static struct work_queue *heavy_wq;
struct heavy_wq_node {
* CVS
*/
-struct thread_master *master;
+struct event_loop *master;
#if 0 /* set to 1 to use system alloc directly */
#undef XMALLOC
#include "zebra/rib.h"
#include "prng.h"
-struct thread_master *master;
+struct event_loop *master;
static int verbose;
static void str_append(char **buf, const char *repr)
exit(status);
}
-struct thread_master *master;
+struct event_loop *master;
/* main routine. */
int main(int argc, char **argv)
{
#include "tests/lib/cli/common_cli.h"
-extern struct thread_master *master;
+extern struct event_loop *master;
static void resolver_result(struct resolver_query *resq, const char *errstr,
int numaddrs, union sockunion *addr)
struct frr_signal_t sigs[] = {};
-struct thread_master *master;
+struct event_loop *master;
void func1(int *arg);
void func3(void);
func2(6, buf);
}
-static void threadfunc(struct thread *thread)
+static void threadfunc(struct event *thread)
{
func3();
}
int main(void)
{
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
signal_init(master, array_size(sigs), sigs);
zlog_aux_init("NONE: ", LOG_DEBUG);
- thread_execute(master, threadfunc, 0, 0);
+ event_execute(master, threadfunc, 0, 0);
exit(0);
}
.handler = &sigusr2,
}};
-struct thread_master *master;
-struct thread t;
+struct event_loop *master;
+struct event t;
int main(void)
{
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
signal_init(master, array_size(sigs), sigs);
zlog_aux_init("NONE: ", LOG_DEBUG);
- while (thread_fetch(master, &t))
- thread_call(&t);
+ while (event_fetch(master, &t))
+ event_call(&t);
exit(0);
}
#define s6_addr32 __u6_addr.__u6_addr32
#endif /*s6_addr32*/
-struct thread_master *master;
+struct event_loop *master;
/* This structure is copied from lib/srcdest_table.c to which it is
* private as far as other parts of Quagga are concerned.
static void test_state_free(struct test_state *test)
{
route_table_finish(test->table);
- hash_clean(test->log, log_free);
- hash_free(test->log);
+ hash_clean_and_free(&test->log, log_free);
XFREE(MTYPE_TMP, test);
}
#include <zebra.h>
#include <stream.h>
-#include <thread.h>
+#include "frrevent.h"
#include "printfrr.h"
static unsigned long long ham = 0xdeadbeefdeadbeef;
-struct thread_master *master;
+struct event_loop *master;
static void print_stream(struct stream *s)
{
char *prefix_str;
} test_node_t;
-struct thread_master *master;
+struct event_loop *master;
/*
* add_node
#include "memory.h"
#include "prng.h"
-#include "thread.h"
+#include "frrevent.h"
#define SCHEDULE_TIMERS 800
#define REMOVE_TIMERS 200
#define TIMESTR_LEN strlen("4294967296.999999")
-struct thread_master *master;
+struct event_loop *master;
static size_t log_buf_len;
static size_t log_buf_pos;
static struct prng *prng;
-static struct thread **timers;
+static struct event **timers;
static int timers_pending;
exit_code = 0;
}
- thread_master_free(master);
+ event_master_free(master);
XFREE(MTYPE_TMP, log_buf);
XFREE(MTYPE_TMP, expected_buf);
prng_free(prng);
exit(exit_code);
}
-static void timer_func(struct thread *thread)
+static void timer_func(struct event *thread)
{
int rv;
int main(int argc, char **argv)
{
int i, j;
- struct thread t;
+ struct event t;
struct timeval **alarms;
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
log_buf_len = SCHEDULE_TIMERS * (TIMESTR_LEN + 1) + 1;
log_buf_pos = 0;
/* Schedule timers to expire in 0..5 seconds */
interval_msec = prng_rand(prng) % 5000;
arg = XMALLOC(MTYPE_TMP, TIMESTR_LEN + 1);
- thread_add_timer_msec(master, timer_func, arg, interval_msec,
- &timers[i]);
+ event_add_timer_msec(master, timer_func, arg, interval_msec,
+ &timers[i]);
ret = snprintf(arg, TIMESTR_LEN + 1, "%lld.%06lld",
(long long)timers[i]->u.sands.tv_sec,
(long long)timers[i]->u.sands.tv_usec);
continue;
XFREE(MTYPE_TMP, timers[index]->arg);
- thread_cancel(&timers[index]);
+ event_cancel(&timers[index]);
timers_pending--;
}
}
XFREE(MTYPE_TMP, alarms);
- while (thread_fetch(master, &t))
- thread_call(&t);
+ while (event_fetch(master, &t))
+ event_call(&t);
return 0;
}
#include <stdio.h>
#include <unistd.h>
-#include "thread.h"
+#include "frrevent.h"
#include "prng.h"
#define SCHEDULE_TIMERS 1000000
#define REMOVE_TIMERS 500000
-struct thread_master *master;
+struct event_loop *master;
-static void dummy_func(struct thread *thread)
+static void dummy_func(struct event *thread)
{
}
{
struct prng *prng;
int i;
- struct thread **timers;
+ struct event **timers;
struct timeval tv_start, tv_lap, tv_stop;
unsigned long t_schedule, t_remove;
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
prng = prng_new(0);
timers = calloc(SCHEDULE_TIMERS, sizeof(*timers));
/* create thread structures so they won't be allocated during the
* time measurement */
for (i = 0; i < SCHEDULE_TIMERS; i++) {
- thread_add_timer_msec(master, dummy_func, NULL, 0, &timers[i]);
+ event_add_timer_msec(master, dummy_func, NULL, 0, &timers[i]);
}
for (i = 0; i < SCHEDULE_TIMERS; i++)
- thread_cancel(&timers[i]);
+ event_cancel(&timers[i]);
monotime(&tv_start);
long interval_msec;
interval_msec = prng_rand(prng) % (100 * SCHEDULE_TIMERS);
- thread_add_timer_msec(master, dummy_func, NULL, interval_msec,
- &timers[i]);
+ event_add_timer_msec(master, dummy_func, NULL, interval_msec,
+ &timers[i]);
}
monotime(&tv_lap);
int index;
index = prng_rand(prng) % SCHEDULE_TIMERS;
- thread_cancel(&timers[index]);
+ event_cancel(&timers[index]);
}
monotime(&tv_stop);
fflush(stdout);
free(timers);
- thread_master_free(master);
+ event_master_free(master);
prng_free(prng);
return 0;
}
DEFINE_MTYPE_STATIC(LIB, TESTBUF, "zmq test buffer");
DEFINE_MTYPE_STATIC(LIB, ZMQMSG, "zmq message");
-static struct thread_master *master;
+static struct event_loop *master;
static void msg_buf_free(void *data, void *hint)
{
printf("server recv: %s\n", buf);
fflush(stdout);
- frrzmq_thread_add_write_msg(master, serverwritefn, NULL, msg_id,
- zmqsock, &cb);
+ frrzmq_event_add_write_msg(master, serverwritefn, NULL, msg_id, zmqsock,
+ &cb);
}
static void serverfn(void *arg, void *zmqsock)
frrzmq_thread_cancel(&cb, &cb->read);
frrzmq_thread_cancel(&cb, &cb->write);
- frrzmq_thread_add_read_part(master, serverpartfn, NULL, NULL, zmqsock,
- &cb);
+ frrzmq_event_add_read_part(master, serverpartfn, NULL, NULL, zmqsock,
+ &cb);
}
static void sigchld(void)
{
void *zmqsock;
char dummy = 0;
- struct thread t;
+ struct event t;
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
signal_init(master, array_size(sigs), sigs);
frrzmq_init();
exit(1);
}
- frrzmq_thread_add_read_msg(master, serverfn, NULL, NULL, zmqsock, &cb);
+ frrzmq_event_add_read_msg(master, serverfn, NULL, NULL, zmqsock, &cb);
write(syncfd, &dummy, sizeof(dummy));
- while (thread_fetch(master, &t))
- thread_call(&t);
+ while (event_fetch(master, &t))
+ event_call(&t);
zmq_close(zmqsock);
frrzmq_finish();
- thread_master_free(master);
+ event_master_free(master);
log_memstats_stderr("test");
}
#include "common.h"
-struct thread_master *master;
+struct event_loop *master;
struct zebra_privs_t ospfd_privs;
struct ospf_test_node *root, struct ospf *ospf);
/* Global variables. */
-extern struct thread_master *master;
+extern struct event_loop *master;
extern struct ospf_topology topo1;
extern struct ospf_topology topo2;
extern struct ospf_topology topo3;
#include <zebra.h>
#include "getopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include <lib/version.h>
#include "vty.h"
#include "command.h"
cmd_terminate();
vty_terminate();
- thread_master_free(master);
+ event_master_free(master);
if (!isexit)
exit(0);
{
char *p;
char *progname;
- struct thread thread;
+ struct event thread;
bool debug = false;
/* Set umask before anything for security */
}
/* master init. */
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
/* Library inits. */
cmd_init(1);
vty_stdio(vty_do_exit);
/* Fetch next active thread. */
- while (thread_fetch(master, &thread))
- thread_call(&thread);
+ while (event_fetch(master, &thread))
+ event_call(&thread);
/* Not reached. */
exit(0);
log file ospf6d.log
!
-debug ospf6 lsa unknown
-debug ospf6 zebra
-debug ospf6 interface
-debug ospf6 neighbor
+!debug ospf6 lsa unknown
+!debug ospf6 zebra
+!debug ospf6 interface
+!debug ospf6 neighbor
!
interface r1-eth4
!
#
# Main router
for i in range(1, 2):
+ net["r%s" % i].loadConf("mgmtd", "%s/r%s/zebra.conf" % (thisDir, i))
net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i))
net["r%s" % i].loadConf("ripd", "%s/r%s/ripd.conf" % (thisDir, i))
net["r%s" % i].loadConf("ripngd", "%s/r%s/ripngd.conf" % (thisDir, i))
router_list = tgen.routers()
for rname, router in router_list.items():
+
daemon_file = "{}/{}/zebra.conf".format(CWD, rname)
router.load_config(TopoRouter.RD_ZEBRA, daemon_file)
-debug bfd network
-debug bfd peer
-debug bfd zebra
+!debug bfd network
+!debug bfd peer
+!debug bfd zebra
!
bfd
profile slow-tx
-debug bfd network
-debug bfd peer
-debug bfd zebra
+!debug bfd network
+!debug bfd peer
+!debug bfd zebra
!
bfd
profile slow-tx
!
-debug bgp updates
+!debug bgp updates
!
router bgp 65010
no bgp ebgp-requires-policy
!
-debug bgp updates
+!debug bgp updates
!
router bgp 65020
no bgp ebgp-requires-policy
!
-debug bgp updates
-debug bgp vpn leak-from-vrf
-debug bgp vpn leak-to-vrf
-debug bgp nht
+!debug bgp updates
+!debug bgp vpn leak-from-vrf
+!debug bgp vpn leak-to-vrf
+!debug bgp nht
!
router bgp 65001
bgp router-id 10.10.10.10
!
-debug bgp updates
+!debug bgp updates
!
router bgp 65001
bgp router-id 10.10.10.101
!
-debug bgp updates
+!debug bgp updates
!
router bgp 65002
no bgp ebgp-requires-policy
-debug bgp neighbor-events
-debug bgp nht
-debug bgp updates in
-debug bgp updates out
+!debug bgp neighbor-events
+!debug bgp nht
+!debug bgp updates in
+!debug bgp updates out
!
router bgp 100
no bgp ebgp-requires-policy
-debug bgp neighbor-events
-debug bgp nht
-debug bgp updates in
-debug bgp updates out
+!debug bgp neighbor-events
+!debug bgp nht
+!debug bgp updates in
+!debug bgp updates out
!
router bgp 200
no bgp ebgp-requires-policy
-debug bgp neighbor-events
-debug bgp nht
-debug bgp updates in
-debug bgp updates out
+!debug bgp neighbor-events
+!debug bgp nht
+!debug bgp updates in
+!debug bgp updates out
!
router bgp 300
no bgp ebgp-requires-policy
-debug bgp neighbor-events
-debug bgp nht
-debug bgp updates in
-debug bgp updates out
+!debug bgp neighbor-events
+!debug bgp nht
+!debug bgp updates in
+!debug bgp updates out
!
router bgp 400
no bgp ebgp-requires-policy
!
-debug bgp neighbor
+!debug bgp neighbor
!
router bgp 65001
no bgp ebgp-requires-policy
host1 = tgen.gears["host1"]
pe1 = tgen.gears["PE1"]
pe2 = tgen.gears["PE2"]
- pe2.vtysh_cmd("debug zebra vxlan")
- pe2.vtysh_cmd("debug zebra kernel")
+ #pe2.vtysh_cmd("debug zebra vxlan")
+ #pe2.vtysh_cmd("debug zebra kernel")
# lets populate that arp cache
host1.run("ping -c1 10.10.1.1")
ip_learn_test(tgen, host1, pe1, pe2, "10.10.1.55")
host2 = tgen.gears["host2"]
pe1 = tgen.gears["PE1"]
pe2 = tgen.gears["PE2"]
- pe1.vtysh_cmd("debug zebra vxlan")
- pe1.vtysh_cmd("debug zebra kernel")
+ #pe1.vtysh_cmd("debug zebra vxlan")
+ #pe1.vtysh_cmd("debug zebra kernel")
# lets populate that arp cache
host2.run("ping -c1 10.10.1.3")
ip_learn_test(tgen, host2, pe2, pe1, "10.10.1.56")
host1 = tgen.gears["host1"]
pe1 = tgen.gears["PE1"]
pe2 = tgen.gears["PE2"]
- pe2.vtysh_cmd("debug zebra vxlan")
- pe2.vtysh_cmd("debug zebra kernel")
+ #pe2.vtysh_cmd("debug zebra vxlan")
+ #pe2.vtysh_cmd("debug zebra kernel")
# lets populate that arp cache
host1.run("ping -c1 10.10.1.1")
ip_learn_test(tgen, host1, pe1, pe2, "10.10.1.55")
host2 = tgen.gears["host2"]
pe1 = tgen.gears["PE1"]
pe2 = tgen.gears["PE2"]
- pe1.vtysh_cmd("debug zebra vxlan")
- pe1.vtysh_cmd("debug zebra kernel")
+ #pe1.vtysh_cmd("debug zebra vxlan")
+ #pe1.vtysh_cmd("debug zebra kernel")
# lets populate that arp cache
host2.run("ping -c1 10.10.1.3")
ip_learn_test(tgen, host2, pe2, pe1, "10.10.1.56")
!
-debug bgp updates
+!debug bgp updates
!
router bgp 65002
no bgp ebgp-requires-policy
--- /dev/null
+!
+!debug bgp updates
+!debug bgp neighbor
+!
+bgp route-map delay-timer 5
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.1.2 remote-as external
+ address-family ipv4 unicast
+ network 10.10.10.1/32
+ network 10.10.10.2/32
+ network 10.10.10.3/32
+ aggregate-address 10.10.10.0/24 summary-only
+ neighbor 192.168.1.2 unsuppress-map r2
+ exit-address-family
+!
+ip prefix-list r1 seq 5 permit 10.10.10.1/32
+ip prefix-list r1 seq 10 permit 10.10.10.2/32
+!
+route-map r2 permit 10
+ match ip address prefix-list r1
+exit
--- /dev/null
+!
+int r1-eth0
+ ip address 192.168.1.1/24
+!
--- /dev/null
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+!
--- /dev/null
+!
+int r2-eth0
+ ip address 192.168.1.2/24
+!
--- /dev/null
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+# Copyright (c) 2023 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+"""
+
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+pytestmark = pytest.mark.bgpd
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def setup_module(mod):
+ topodef = {"s1": ("r1", "r2")}
+ tgen = Topogen(topodef, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_route_map_delay_timer():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+
+ def _bgp_converge_1():
+ output = json.loads(
+ r1.vtysh_cmd(
+ "show bgp ipv4 unicast neighbor 192.168.1.2 advertised-routes json"
+ )
+ )
+ expected = {
+ "advertisedRoutes": {
+ "10.10.10.0/24": {},
+ "10.10.10.1/32": {},
+ "10.10.10.2/32": {},
+ "10.10.10.3/32": None,
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge_1)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "10.10.10.3/32 should not be advertised to r2"
+
+ # Set route-map delay-timer to max value and remove 10.10.10.2/32.
+ # After this, r1 MUST do not announce updates immediately, and wait
+ # 600 seconds before withdrawing 10.10.10.2/32.
+ r2.vtysh_cmd(
+ """
+ configure terminal
+ bgp route-map delay-timer 600
+ no ip prefix-list r1 seq 10 permit 10.10.10.2/32
+ """
+ )
+
+ def _bgp_converge_2():
+ output = json.loads(
+ r1.vtysh_cmd(
+ "show bgp ipv4 unicast neighbor 192.168.1.2 advertised-routes json"
+ )
+ )
+ expected = {
+ "advertisedRoutes": {
+ "10.10.10.0/24": {},
+ "10.10.10.1/32": {},
+ "10.10.10.2/32": None,
+ "10.10.10.3/32": None,
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ # We are checking `not None` here to wait count*wait time and if we have different
+ # results than expected, it means good - 10.10.10.2/32 wasn't withdrawn immediately.
+ test_func = functools.partial(_bgp_converge_2)
+ _, result = topotest.run_and_expect(test_func, not None, count=60, wait=0.5)
+ assert (
+ result is not None
+ ), "10.10.10.2/32 advertised, but should not be advertised to r2"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
!
-debug bgp updates
-debug bgp vpn leak-from-vrf
-debug bgp vpn leak-to-vrf
-debug bgp nht
-debug route-map
+!debug bgp updates
+!debug bgp vpn leak-from-vrf
+!debug bgp vpn leak-to-vrf
+!debug bgp nht
+!debug route-map
!
router bgp 65001
bgp router-id 10.10.10.10
!
-debug bgp updates
+!debug bgp updates
!
router bgp 65002
no bgp ebgp-requires-policy
log monitor notifications
log commands
!
-debug zebra packet
-debug zebra dplane
-debug zebra kernel
+!debug zebra packet
+!debug zebra dplane
+!debug zebra kernel
!
interface eth0
ipv6 address 2001::1/64
log monitor notifications
log commands
!
-debug zebra packet
-debug zebra dplane
-debug zebra kernel
+!debug zebra packet
+!debug zebra dplane
+!debug zebra kernel
!
interface eth0
ipv6 address 2001::2/64
log monitor notifications
log commands
!
-debug zebra packet
-debug zebra dplane
-debug zebra kernel
+!debug zebra packet
+!debug zebra dplane
+!debug zebra kernel
!
interface eth0
ipv6 address 2001::1/64
frr defaults traditional
!
-bgp send-extra-data zebra
+!bgp send-extra-data zebra
!
hostname r2
password zebra
log monitor notifications
log commands
!
-debug zebra packet
-debug zebra dplane
-debug zebra kernel
+!debug zebra packet
+!debug zebra dplane
+!debug zebra kernel
!
interface eth0
ipv6 address 2001::2/64
!
ip route 192.168.1.1/32 10.0.0.10
!
-debug bgp bestpath
-debug bgp nht
-debug bgp updates
-debug bgp update-groups
-debug bgp zebra
-debug zebra rib detail
+!debug bgp bestpath
+!debug bgp nht
+!debug bgp updates
+!debug bgp update-groups
+!debug bgp zebra
+!debug zebra rib detail
!
router bgp 2
address-family ipv4 uni
-debug bgp updates
-debug bgp bestpath 40.0.0.0/8
-debug bgp zebra
+!debug bgp updates
+!debug bgp bestpath 40.0.0.0/8
+!debug bgp zebra
!
router bgp 2
no bgp ebgp-requires-policy
neighbor 10.0.0.1 remote-as 1
neighbor 10.0.0.10 remote-as 3
address-family ipv4 uni
- network 60.0.0.0/24
\ No newline at end of file
+ network 60.0.0.0/24
assertmsg = '"r2" 192.168.1.1/32 route should be gone'
assert result is None, assertmsg
+def test_local_vs_non_local():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r2 = tgen.gears["r2"]
+
+ output = json.loads(r2.vtysh_cmd("show bgp ipv4 uni 60.0.0.0/24 json"))
+ paths = output["paths"]
+ for i in range(len(paths)):
+ if "fibPending" in paths[i]:
+ assert(False), "Route 60.0.0.0/24 should not have fibPending"
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
--- /dev/null
+[exabgp.api]
+encoder = text
+highres = false
+respawn = false
+socket = ''
+
+[exabgp.bgp]
+openwait = 60
+
+[exabgp.cache]
+attributes = true
+nexthops = true
+
+[exabgp.daemon]
+daemonize = true
+pid = '/var/run/exabgp/exabgp.pid'
+user = 'exabgp'
+##daemonize = false
+
+[exabgp.log]
+all = false
+configuration = true
+daemon = true
+destination = '/var/log/exabgp.log'
+enable = true
+level = INFO
+message = false
+network = true
+packets = false
+parser = false
+processes = true
+reactor = true
+rib = false
+routes = false
+short = false
+timers = false
+
+[exabgp.pdb]
+enable = false
+
+[exabgp.profile]
+enable = false
+file = ''
+
+[exabgp.reactor]
+speed = 1.0
+
+[exabgp.tcp]
+acl = false
+bind = ''
+delay = 0
+once = false
+port = 179
--- /dev/null
+neighbor 10.0.0.1 {
+ router-id 10.0.0.2;
+ local-address 10.0.0.2;
+ local-as 65001;
+ peer-as 65534;
+ md5 test123;
+
+ static {
+ route 192.168.100.1/32 {
+ next-hop 10.0.0.2;
+ }
+ }
+}
--- /dev/null
+!
+!debug bgp neighbor
+!
+router bgp 65534 vrf public
+ bgp router-id 10.0.0.1
+ no bgp ebgp-requires-policy
+ neighbor 10.0.0.2 remote-as external
+ neighbor 10.0.0.2 timers 3 10
+ neighbor 10.0.0.2 timers connect 1
+ neighbor 10.0.0.2 password test123
+!
--- /dev/null
+!
+interface r1-eth0 vrf public
+ ip address 10.0.0.1/24
+!
+ip forwarding
+!
--- /dev/null
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# Copyright (c) 2023 by
+# Donatas Abraitis <donatas.abraitis@gmail.com>
+#
+
+"""
+Test if BGP MD5 basic authentication works per-VRF.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ r1 = tgen.add_router("r1")
+ peer1 = tgen.add_exabgp_peer("peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(r1)
+ switch.add_link(peer1)
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ r1 = tgen.gears["r1"]
+ r1.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, "r1/zebra.conf"))
+ r1.load_config(TopoRouter.RD_BGP, os.path.join(CWD, "r1/bgpd.conf"))
+ r1.start()
+
+ peer = tgen.gears["peer1"]
+ peer.start(os.path.join(CWD, "peer1"), os.path.join(CWD, "exabgp.env"))
+
+ # VRF 'public'
+ r1.cmd_raises("ip link add public type vrf table 1001")
+ r1.cmd_raises("ip link set up dev public")
+ r1.cmd_raises("ip link set r1-eth0 master public")
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_vrf_md5_peering():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ def _bgp_converge():
+ output = json.loads(
+ tgen.gears["r1"].vtysh_cmd("show ip bgp vrf public neighbor 10.0.0.2 json")
+ )
+ expected = {
+ "10.0.0.2": {
+ "bgpState": "Established",
+ "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 1}},
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+
+ assert result is None, "Can't peer with md5 per-VRF"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
import os
import sys
import pytest
-
+from lib import topotest
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
router.logdir, rname, "{}-routes-{}.conf".format(iptype.lower(), optype)
)
with open(config_file, "w") as f:
- for i, net in enumerate(get_ip_networks(super_prefix, base_count, count)):
+ for i, net in enumerate(
+ get_ip_networks(super_prefix, base_count, count)
+ ):
if i in bad_indices:
if add:
f.write("ip route {} {} bad_input\n".format(net, via))
return tot_delta
-
# Number of static routes
router = tgen.gears["r1"]
output = router.run("vtysh -h | grep address-sanitizer")
[u"2100:1111:2220::/44", u"2100:3333:4440::/44"],
]
+ # This apparently needed to allow for various mgmtd/staticd/zebra connections to form
+ # which then SLOWS execution down. If we don't include this value then the
+ # initial, baseline establishing, time is 2 time faster (e.g., 5s instead of 10s),
+ # but all later runs are slower and fail.
+ #
+ # This should be done differently based on actual facts.
+ topotest.sleep(5)
+
bad_indices = []
for ipv6 in [False, True]:
base_delta = do_config(
- prefix_count, prefix_count, bad_indices, 0, 0, True, ipv6, prefix_base[ipv6][0]
+ prefix_count,
+ prefix_count,
+ bad_indices,
+ 0,
+ 0,
+ True,
+ ipv6,
+ prefix_base[ipv6][0],
)
# Another set of same number of prefixes
do_config(
- prefix_count, prefix_count, bad_indices, base_delta, 3, True, ipv6, prefix_base[ipv6][1]
+ prefix_count,
+ prefix_count,
+ bad_indices,
+ base_delta,
+ 3,
+ True,
+ ipv6,
+ prefix_base[ipv6][1],
)
# Duplicate config
do_config(
- prefix_count, prefix_count, bad_indices, base_delta, 3, True, ipv6, prefix_base[ipv6][0]
+ prefix_count,
+ prefix_count,
+ bad_indices,
+ base_delta,
+ 3,
+ True,
+ ipv6,
+ prefix_base[ipv6][0],
)
# Remove 1/2 of duplicate
# Add all back in so 1/2 replicate 1/2 new
do_config(
- prefix_count, prefix_count, bad_indices, base_delta, 3, True, ipv6, prefix_base[ipv6][0]
+ prefix_count,
+ prefix_count,
+ bad_indices,
+ base_delta,
+ 3,
+ True,
+ ipv6,
+ prefix_base[ipv6][0],
)
# remove all
delta = do_config(
- prefix_count, prefix_count, bad_indices, base_delta, 3, False, ipv6, prefix_base[ipv6][0]
+ prefix_count,
+ prefix_count,
+ bad_indices,
+ base_delta,
+ 3,
+ False,
+ ipv6,
+ prefix_base[ipv6][0],
)
delta += do_config(
- prefix_count, prefix_count, bad_indices, base_delta, 3, False, ipv6, prefix_base[ipv6][1]
+ prefix_count,
+ prefix_count,
+ bad_indices,
+ base_delta,
+ 3,
+ False,
+ ipv6,
+ prefix_base[ipv6][1],
)
hostname r5
log file ldpd.log
!
-debug mpls ldp zebra
-debug mpls ldp event
-debug mpls ldp errors
-debug mpls ldp sync
+!debug mpls ldp zebra
+!debug mpls ldp event
+!debug mpls ldp errors
+!debug mpls ldp sync
!
mpls ldp
router-id 3.3.3.3
"debug zebra vxlan",
"debug zebra nht",
],
+ "mgmt": [],
"ospf": [
"debug ospf event",
"debug ospf ism",
result = rnode.check_router_running()
if result != "":
daemons = []
+ if "mgmtd" in result:
+ daemons.append("mgmtd")
if "bgpd" in result:
daemons.append("bgpd")
if "zebra" in result:
feature.add("ospf6")
break
+ # Loading empty mgmtd.conf file to router, to start the mgmtd daemon
+ router.load_config(
+ TopoRouter.RD_MGMTD, "{}/{}/mgmtd.conf".format(tgen.logdir, rname)
+ )
+
# Loading empty zebra.conf file to router, to start the zebra deamon
router.load_config(
TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(tgen.logdir, rname)
nexthop = set_data.setdefault("nexthop", None)
origin = set_data.setdefault("origin", None)
ext_comm_list = set_data.setdefault("extcommunity", {})
- metrictype = set_data.setdefault("metric-type", {})
+ metrictype = set_data.setdefault("metric-type", None)
# Local Preference
if local_preference:
memleak_file = os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.config.get(
self.CONFIG_SECTION, "memleak_path"
)
- if memleak_file == "" or memleak_file == None:
+ if memleak_file == "" or memleak_file is None:
return False
return True
RD_PATH = 17
RD_SNMP = 18
RD_PIM6 = 19
+ RD_MGMTD = 20
RD = {
RD_FRR: "frr",
RD_ZEBRA: "zebra",
RD_PBRD: "pbrd",
RD_PATH: "pathd",
RD_SNMP: "snmpd",
+ RD_MGMTD: "mgmtd",
}
def __init__(self, tgen, cls, name, **params):
TopoRouter.RD_RIPNG, TopoRouter.RD_OSPF, TopoRouter.RD_OSPF6,
TopoRouter.RD_ISIS, TopoRouter.RD_BGP, TopoRouter.RD_LDP,
TopoRouter.RD_PIM, TopoRouter.RD_PIM6, TopoRouter.RD_PBR,
- TopoRouter.RD_SNMP.
+ TopoRouter.RD_SNMP, TopoRouter.RD_MGMTD.
Possible `source` values are `None` for an empty config file, a path name which is
used directly, or a file name with no path components which is first looked for
memleak_file = (
os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.params["memleak_path"]
)
- if memleak_file == "" or memleak_file == None:
+ if memleak_file == "" or memleak_file is None:
return
self.stop()
self.run("chown -R exabgp:exabgp /etc/exabgp")
output = self.run(exacmd + " -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg")
- if output == None or len(output) == 0:
+ if output is None or len(output) == 0:
output = "<none>"
logger.info("{} exabgp started, output={}".format(self.name, output))
"pim6d",
"ldpd",
"pbrd",
+ "mgmtd",
]:
path = os.path.join(frrdir, fname)
if not os.path.isfile(path):
logger.error("could not find {} in {}".format(fname, frrdir))
ret = False
else:
- if fname != "zebra":
+ if fname != "zebra" or fname != "mgmtd":
continue
+ os.system("{} -v 2>&1 >{}/frr_mgmtd.txt".format(path, rundir))
os.system("{} -v 2>&1 >{}/frr_zebra.txt".format(path, rundir))
# Test MPLS availability
pass
return False
+
def iproute2_is_fdb_get_capable():
"""
Checks if the iproute2 version installed on the system is capable of
pass
return False
+
def module_present_linux(module, load):
"""
Returns whether `module` is present.
"pbrd": 0,
"pathd": 0,
"snmpd": 0,
+ "mgmtd": 0,
}
self.daemons_options = {"zebra": ""}
self.reportCores = True
if not os.path.isfile(zebra_path):
raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
+ mgmtd_path = os.path.join(self.daemondir, "mgmtd")
+ if not os.path.isfile(mgmtd_path):
+ raise Exception("FRR MGMTD binary doesn't exist at {}".format(mgmtd_path))
+
# pylint: disable=W0221
# Some params are only meaningful for the parent class.
def config(self, **params):
zpath = os.path.join(self.daemondir, "zebra")
if not os.path.isfile(zpath):
raise Exception("No zebra binary found in {}".format(zpath))
+
+ cpath = os.path.join(self.daemondir, "mgmtd")
+ if not os.path.isfile(zpath):
+ raise Exception("No MGMTD binary found in {}".format(cpath))
# Allow user to specify routertype when the path was specified.
if params.get("routertype") is not None:
self.routertype = params.get("routertype")
self.cmd_raises("rm -f " + conf_file)
self.cmd_raises("touch " + conf_file)
else:
+ # copy zebra.conf to mgmtd folder, which can be used during startup
+ if daemon == "zebra":
+ conf_file_mgmt = "/etc/{}/{}.conf".format(self.routertype, "mgmtd")
+ self.cmd_raises("cp {} {}".format(source, conf_file_mgmt))
self.cmd_raises("cp {} {}".format(source, conf_file))
if not self.unified_config or daemon == "frr":
self.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf')
self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
+ if (daemon == "zebra") and (self.daemons["mgmtd"] == 0):
+ # Add mgmtd with zebra - if it exists
+ try:
+ mgmtd_path = os.path.join(self.daemondir, "mgmtd")
+ except:
+ pdb.set_trace()
+ if os.path.isfile(mgmtd_path):
+ self.daemons["mgmtd"] = 1
+ self.daemons_options["mgmtd"] = ""
+ # Auto-Started mgmtd has no config, so it will read from zebra config
+
if (daemon == "zebra") and (self.daemons["staticd"] == 0):
# Add staticd with zebra - if it exists
try:
self.daemons["staticd"] = 1
self.daemons_options["staticd"] = ""
# Auto-Started staticd has no config, so it will read from zebra config
+
else:
logger.info("No daemon {} known".format(daemon))
# print "Daemons after:", self.daemons
else:
logger.info("%s: %s %s started", self, self.routertype, daemon)
- # Start Zebra first
+ # Start mgmtd first
+ if "mgmtd" in daemons_list:
+ start_daemon("mgmtd")
+ while "mgmtd" in daemons_list:
+ daemons_list.remove("mgmtd")
+
+ # Start Zebra after mgmtd
if "zebra" in daemons_list:
start_daemon("zebra", "-s 90000000")
while "zebra" in daemons_list:
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+"""
+
+1. Verify mgmt commit check.
+2. Verify mgmt commit apply.
+3. Verify mgmt commit abort.
+4. Verify mgmt delete config.
+5. Kill mgmtd - verify that static routes are intact.
+6. Kill mgmtd - verify that watch frr restarts.
+7. Show and CLI - Execute all the newly introduced commands of mgmtd.
+8. Verify mgmt rollback functionality.
+
+"""
+import sys
+import time
+import os
+import pytest
+import platform
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topotest import version_cmp
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ verify_rib,
+ create_static_routes,
+ check_address_types,
+ step,
+ shutdown_bringup_interface,
+ stop_router,
+ start_router,
+ apply_raw_config,
+ kill_router_daemons,
+ start_router_daemons,
+)
+from lib.topolog import logger
+from lib.bgp import verify_bgp_convergence, create_router_bgp, verify_bgp_rib
+from lib.topojson import build_config_from_json
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+# Global variables
+ADDR_TYPES = check_address_types()
+NETWORK = {"ipv4": ["11.0.20.1/32", "11.0.20.2/32"], "ipv6": ["2::1/128", "2::2/128"]}
+NETWORK2 = {"ipv4": "11.0.20.1/32", "ipv6": "2::1/128"}
+PREFIX1 = {"ipv4": "110.0.20.1/32", "ipv6": "20::1/128"}
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment.
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/yang_mgmt.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ if version_cmp(platform.release(), "4.19") < 0:
+ error_msg = (
+ 'These tests will not run. (have kernel "{}", '
+ "requires kernel >= 4.19)".format(platform.release())
+ )
+ pytest.skip(error_msg)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology: %s", mod)
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+def populate_nh():
+ """
+ Populate nexthops.
+ """
+
+ next_hop_ip = {
+ "nh1": {
+ "ipv4": topo["routers"]["r1"]["links"]["r2-link0"]["ipv4"].split("/")[0],
+ "ipv6": topo["routers"]["r1"]["links"]["r2-link0"]["ipv6"].split("/")[0],
+ },
+ "nh2": {
+ "ipv4": topo["routers"]["r1"]["links"]["r2-link1"]["ipv4"].split("/")[0],
+ "ipv6": topo["routers"]["r1"]["links"]["r2-link1"]["ipv6"].split("/")[0],
+ },
+ }
+ return next_hop_ip
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_mgmt_commit_check(request):
+ """
+ Verify mgmt commit check.
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+
+ step("Mgmt Commit check")
+ raw_config = {
+ "r1": {
+ "raw_config": [
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.2/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
+ "mgmt commit check",
+ ]
+ }
+ }
+
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Mgmt Commit check")
+ raw_config = {
+ "r1": {
+ "raw_config": [
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.2/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
+ "mgmt commit check",
+ ]
+ }
+ }
+
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify that the route is not configured, as commit apply not done.")
+
+ dut = "r1"
+ protocol = "static"
+ input_dict_4 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": "1192.1.1.2/32",
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
+def test_mgmt_commit_apply(request):
+ """
+ Verify mgmt commit apply.
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+
+ step("Mgmt Commit apply with Valid Configuration")
+ raw_config = {
+ "r1": {
+ "raw_config": [
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.20/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
+ "mgmt commit apply",
+ ]
+ }
+ }
+
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Mgmt Commit apply with Invalid Configuration")
+ raw_config = {
+ "r1": {
+ "raw_config": [
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.20/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
+ "mgmt commit apply",
+ ]
+ }
+ }
+
+ result = apply_raw_config(tgen, raw_config)
+ assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify that the route is configured")
+
+ dut = "r1"
+ protocol = "static"
+ input_dict_4 = {"r2": {"static_routes": [{"network": "192.1.1.20/32"}]}}
+ result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
+def test_mgmt_commit_abort(request):
+ """
+ Verify mgmt commit abort.
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+
+ step("Mgmt Commit abort")
+ raw_config = {
+ "r1": {
+ "raw_config": [
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.3/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
+ "mgmt commit abort",
+ ]
+ }
+ }
+
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify that the route is not configured")
+
+ dut = "r1"
+ protocol = "static"
+ input_dict_4 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": "192.1.1.3/32",
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
+def test_mgmt_delete_config(request):
+ """
+ Verify mgmt delete config.
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+
+ step("Mgmt - Configure a static route using commit apply")
+
+ raw_config = {
+ "r1": {
+ "raw_config": [
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.168.1.3/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
+ "mgmt commit apply",
+ ]
+ }
+ }
+
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify that the route is added to RIB")
+ dut = "r1"
+ protocol = "static"
+ input_dict_4 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": "192.168.1.3/32",
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Mgmt delete config")
+ raw_config = {
+ "r1": {
+ "raw_config": [
+ "mgmt delete-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.168.1.3/32'][afi-safi='frr-routing:ipv4-unicast']",
+ "mgmt commit apply",
+ ]
+ }
+ }
+
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify that the route is deleted from RIB")
+ result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes is still present in RIB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
+def test_mgmt_chaos_stop_start_frr(request):
+ """
+ Kill mgmtd - verify that watch frr restarts.
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+ next_hop_ip = populate_nh()
+
+ step("Configure Static route with next hop null 0")
+
+ raw_config = {
+ "r1": {
+ "raw_config": [
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
+ "mgmt commit apply",
+ ]
+ }
+ }
+
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify that the route is configured and present in the zebra")
+
+ dut = "r1"
+ protocol = "static"
+ input_dict_4 = {"r2": {"static_routes": [{"network": "192.1.11.200/32"}]}}
+ result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Restart frr")
+ stop_router(tgen, "r1")
+ start_router(tgen, "r1")
+ step("Verify routes are intact in zebra.")
+ result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("delete the configured route and ")
+ raw_config = {
+ "r1": {
+ "raw_config": [
+ "mgmt delete-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][afi-safi='frr-routing:ipv4-unicast']",
+ "mgmt commit apply",
+ ]
+ }
+ }
+
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify that the route is deleted and deleted from zebra")
+
+ dut = "r1"
+ protocol = "static"
+ input_dict_4 = {"r1": {"static_routes": [{"network": "192.1.11.200/32"}]}}
+ result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
+def test_mgmt_chaos_kill_daemon(request):
+ """
+ Kill mgmtd - verify that static routes are intact
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ reset_config_on_routers(tgen)
+ next_hop_ip = populate_nh()
+
+ step("Configure Static route with next hop null 0")
+
+ raw_config = {
+ "r1": {
+ "raw_config": [
+ "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
+ "mgmt commit apply",
+ ]
+ }
+ }
+
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify that the route is configured and present in the zebra")
+
+ dut = "r1"
+ protocol = "static"
+ input_dict_4 = {"r2": {"static_routes": [{"network": "192.1.11.200/32"}]}}
+ result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Kill static daemon on R2.")
+ kill_router_daemons(tgen, "r1", ["staticd"])
+
+ step("Bring up staticd daemon on R2.")
+ start_router_daemons(tgen, "r1", ["staticd"])
+
+ step("Verify routes are intact in zebra.")
+ result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Kill mgmt daemon on R2.")
+ kill_router_daemons(tgen, "r1", ["mgmtd"])
+
+ step("Bring up zebra daemon on R2.")
+ start_router_daemons(tgen, "r1", ["mgmtd"])
+
+ step("Verify routes are intact in zebra.")
+ result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 30,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link0": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link0": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1-link0": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r1-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 4
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2": {
+ "keepalivetimer": 1,
+ "holddowntimer": 4
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r1-link0": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r1-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 4
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3": {
+ "keepalivetimer": 1,
+ "holddowntimer": 4
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
router_list = tgen.routers()
for rname, router in router_list.items():
+
daemon_file = "{}/{}/zebra.conf".format(CWD, rname)
if os.path.isfile(daemon_file):
router.load_config(TopoRouter.RD_ZEBRA, daemon_file)
router_list = tgen.routers()
for rname, router in router_list.items():
+
daemon_file = "{}/{}/zebra.conf".format(CWD, rname)
if os.path.isfile(daemon_file):
router.load_config(TopoRouter.RD_ZEBRA, daemon_file)
-debug ospf6 lsa all
-debug ospf6 message all
-debug ospf6 route all
-debug ospf6 spf time
-debug ospf6 spf database
-debug ospf6 zebra send
-debug ospf6 zebra recv
-
-debug ospf6 lsa router
-debug ospf6 lsa router originate
-debug ospf6 lsa router examine
-debug ospf6 lsa router flooding
-debug ospf6 lsa as-external
-debug ospf6 lsa as-external originate
-debug ospf6 lsa as-external examine
-debug ospf6 lsa as-external flooding
-debug ospf6 lsa intra-prefix
-debug ospf6 lsa intra-prefix originate
-debug ospf6 lsa intra-prefix examine
-debug ospf6 lsa intra-prefix flooding
-debug ospf6 border-routers
-debug ospf6 zebra
-debug ospf6 interface
-debug ospf6 neighbor
-debug ospf6 flooding
-debug ospf6 gr helper
-debug ospf6 spf process
-debug ospf6 route intra-area
-debug ospf6 route inter-area
-debug ospf6 abr
-debug ospf6 asbr
-debug ospf6 nssa
+!debug ospf6 lsa all
+!debug ospf6 message all
+!debug ospf6 route all
+!debug ospf6 spf time
+!debug ospf6 spf database
+!debug ospf6 zebra send
+!debug ospf6 zebra recv
+!
+!debug ospf6 lsa router
+!debug ospf6 lsa router originate
+!debug ospf6 lsa router examine
+!debug ospf6 lsa router flooding
+!debug ospf6 lsa as-external
+!debug ospf6 lsa as-external originate
+!debug ospf6 lsa as-external examine
+!debug ospf6 lsa as-external flooding
+!debug ospf6 lsa intra-prefix
+!debug ospf6 lsa intra-prefix originate
+!debug ospf6 lsa intra-prefix examine
+!debug ospf6 lsa intra-prefix flooding
+!debug ospf6 border-routers
+!debug ospf6 zebra
+!debug ospf6 interface
+!debug ospf6 neighbor
+!debug ospf6 flooding
+!debug ospf6 gr helper
+!debug ospf6 spf process
+!debug ospf6 route intra-area
+!debug ospf6 route inter-area
+!debug ospf6 abr
+!debug ospf6 asbr
+!debug ospf6 nssa
!
interface r1-eth0
ipv6 ospf6 area 0
router_list = tgen.routers()
for rname, router in router_list.items():
+
daemon_file = "{}/{}/zebra.conf".format(CWD, rname)
if os.path.isfile(daemon_file):
router.load_config(TopoRouter.RD_ZEBRA, daemon_file)
pytest.skip(tgen.errors)
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error {}".format(
ospf_covergence
)
result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
- step(
- "Configure External Route summary in R0 to summarise 5" " routes to one route."
- )
+ step("Configure External Route summary in R0 to summarise 5 routes to one route.")
ospf_summ_r1 = {
"r0": {
"ospf": {
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step("Change the summary address mask to lower match (ex - 16 to 8)")
ospf_summ_r1 = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step(
"Verify that external routes(static / connected) are summarised"
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Change the summary address mask to higher match (ex - 8 to 24)")
ospf_summ_r1 = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step(
"Verify that external routes(static / connected) are summarised"
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step(" Un configure one of the summary address.")
ospf_summ_r1 = {
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
ospf_summ_r1 = {
"r0": {
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
write_test_footer(tc_name)
result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
- step(
- "Configure External Route summary in R0 to summarise 5" " routes to one route."
- )
+ step("Configure External Route summary in R0 to summarise 5 routes to one route.")
ospf_summ_r1 = {
"r0": {
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
- step("Verify that originally advertised routes are withdraw from there" " peer.")
+ step("Verify that originally advertised routes are withdraw from there peer.")
input_dict = {
"r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
}
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed\n Expected: Routes should not be present in OSPF RIB \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in RIB\n"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
step(
"Configure route map and & rule to permit configured summary address,"
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
input_dict = {
SUMMARY["ipv4"][0]: {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
write_test_footer(tc_name)
result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step(
"Configure External Route summary in R0 to summarise 5"
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
- step("Verify that originally advertised routes are withdraw from there" " peer.")
+ step("Verify that originally advertised routes are withdraw from there peer.")
input_dict = {
"r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
}
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in RIB"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
step("Delete the configured summary")
ospf_summ_r1 = {
step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
- tc_name
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Summary Route should not present in RIB"
+ "Error: Summary Route still present in RIB".format(tc_name)
)
step("show ip ospf summary should not have any summary address.")
}
dut = "r0"
result = verify_ospf_summary(tgen, topo, dut, input_dict, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Summary Route should not present in OSPF DB"
+ "Error: Summary still present in DB".format(tc_name)
+ )
dut = "r1"
step("All 5 routes are advertised after deletion of configured summary.")
result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("configure the summary again and delete static routes .")
ospf_summ_r1 = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
input_dict = {
"r0": {
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in RIB\n"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
step("Add back static routes.")
input_dict_static_rtes = {
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict_static_rtes, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in RIB"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
dut = "r1"
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show configure summaries.")
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step("Configure new static route which is matching configured summary.")
input_dict_static_rtes = {
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Shut one of the interface")
intf = topo["routers"]["r0"]["links"]["r3-link0"]["interface"]
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
ospf_summ_r1 = {
"r0": {
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB. \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in RIB"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
ospf_summ_r1 = {
"r0": {
result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
- step(
- "Configure External Route summary in R0 to summarise 5" " routes to one route."
- )
+ step("Configure External Route summary in R0 to summarise 5 routes to one route.")
ospf_summ_r1 = {
"r0": {
"ospf": {
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries with tag.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step("Delete the configured summary")
ospf_summ_r1 = {
step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
- tc_name
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Summary Routes should not be present in RIB. \n"
+ "Error: Summary Route still present in RIB".format(tc_name)
)
step("show ip ospf summary should not have any summary address.")
result = verify_ospf_summary(tgen, topo, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary still present in DB".format(tc_name)
step("Configure Min tag value")
ospf_summ_r1 = {
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries with tag.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step("Configure Max Tag Value")
ospf_summ_r1 = {
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step(
"Verify that boundary values tags are used for summary route"
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step("configure new static route with different tag.")
input_dict_static_rtes_11 = {
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict_summary, tag="88888", expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tag="88888",
expected=False,
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in RIB\n"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
step(
"Verify that boundary values tags are used for summary route"
result = verify_ospf_summary(tgen, topo, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step("Delete the configured summary address")
ospf_summ_r1 = {
result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that summary address is flushed from neighbor.")
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in RIB\n"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
step("Configure summary first & then configure matching static route.")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error {}".format(
ospf_covergence
)
result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
- step(
- "Configure External Route summary in R0 to summarise 5" " routes to one route."
- )
+ step("Configure External Route summary in R0 to summarise 5 routes to one route.")
ospf_summ_r1 = {
"r0": {
"ospf": {
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries with tag.")
input_dict = {
step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
- tc_name
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in RIB "
+ "Error: Summary Route still present in RIB".format(tc_name)
)
step("show ip ospf summary should not have any summary address.")
result = verify_ospf_summary(tgen, topo, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary still present in DB".format(tc_name)
step("Configure Min tag value")
ospf_summ_r1 = {
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries with tag.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step("Configure Max Tag Value")
ospf_summ_r1 = {
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step(
"Verify that boundary values tags are used for summary route"
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step("configure new static route with different tag.")
input_dict_static_rtes_11 = {
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict_summary, tag="88888", expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tag="88888",
expected=False,
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed\n Expected: Routes should not be present in RIB.\n"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
step(
"Verify that boundary values tags are used for summary route"
result = verify_ospf_summary(tgen, topo, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step("Delete the configured summary address")
ospf_summ_r1 = {
result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that summary address is flushed from neighbor.")
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in RIB \n"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
step("Configure summary first & then configure matching static route.")
result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step(
"Configure External Route summary in R0 to summarise 5"
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB.\n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed\n Expected: Routes should not be present in RIB."
+ "Error: Routes still present in RIB".format(tc_name)
+ )
- step("Verify that show ip ospf summary should show the " "configured summaries.")
+ step("Verify that show ip ospf summary should show the configured summaries.")
input_dict = {
SUMMARY["ipv4"][0]: {
"summaryAddress": SUMMARY["ipv4"][0],
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step("Delete the configured summary")
ospf_summ_r1 = {
step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB. \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
- tc_name
- )
+ ), "Testcase {} : Failed. Error: Summary Route still present in RIB".format(tc_name)
step("show ip ospf summary should not have any summary address.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary still present in DB".format(tc_name)
step("Reconfigure summary with no advertise.")
ospf_summ_r1 = {
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB. \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in RIB \n"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
- step("Verify that show ip ospf summary should show the " "configured summaries.")
+ step("Verify that show ip ospf summary should show the configured summaries.")
input_dict = {
SUMMARY["ipv4"][0]: {
"summaryAddress": SUMMARY["ipv4"][0],
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step(
"Change summary address from no advertise to advertise "
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
- step("Verify that originally advertised routes are withdraw from there" " peer.")
+ step("Verify that originally advertised routes are withdraw from there peer.")
input_dict = {
"r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
}
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes is present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed\n Expected: Routes should not be present in RIB"
+ "Error: Routes is present in RIB".format(tc_name)
+ )
write_test_footer(tc_name)
result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
- step(
- "Configure External Route summary in R0 to summarise 5" " routes to one route."
- )
+ step("Configure External Route summary in R0 to summarise 5 routes to one route.")
ospf_summ_r1 = {
"r0": {
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
- step("Verify that originally advertised routes are withdraw from there" " peer.")
+ step("Verify that originally advertised routes are withdraw from there peer.")
input_dict = {
"r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
}
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n \n Expected: Routes should not be present in OSPF RIB.\n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed\n Expected: Routes should not be present in RIB.\n"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
step(
"configure route map and add rule to permit configured static "
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
input_dict = {
SUMMARY["ipv4"][0]: {
step("Verify that advertised summary route is flushed from neighbor.")
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB\n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in RIB.\n"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
step("Delete the configured route map.")
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
input_dict = {
SUMMARY["ipv4"][0]: {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step("Reconfigure the route map with denying configure summary address.")
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Redistribute static/connected routes without route map.")
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
input_dict = {
SUMMARY["ipv4"][0]: {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step(
"Configure rule to deny all the routes in route map and configure"
step("Verify that no summary route is originated.")
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB.\n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed\n Expected: Routes should not be present in RIB"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
routemaps = {
"r0": {
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step("Change route map rule for 1 of the routes to deny.")
# Create ip prefix list
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("add rule in route map to deny configured summary address.")
# Create ip prefix list
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
write_test_footer(tc_name)
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
write_test_footer(tc_name)
result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
- step(
- "Configure External Route summary in R0 to summarise 5" " routes to one route."
- )
+ step("Configure External Route summary in R0 to summarise 5 routes to one route.")
ospf_summ_r1 = {
"r0": {
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
- step("Verify that originally advertised routes are withdraw from there" " peer.")
+ step("Verify that originally advertised routes are withdraw from there peer.")
input_dict = {
"r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
}
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB.\n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed\n Expected: Routes should not be present in RIB.\n"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
step("Reload the FRR router")
# stop/start -> restart FRR router and verify
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
- step("Verify that originally advertised routes are withdraw from there" " peer.")
+ step("Verify that originally advertised routes are withdraw from there peer.")
input_dict = {
"r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
}
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB. \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed\n Expected: Routes should not be present in RIB\n"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
step("Kill OSPFd daemon on R0.")
kill_router_daemons(tgen, "r0", ["ospfd"])
step("Verify OSPF neighbors are up after bringing back ospfd in R0")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error {}".format(
ospf_covergence
)
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
- step("Verify that originally advertised routes are withdraw from there" " peer.")
+ step("Verify that originally advertised routes are withdraw from there peer.")
input_dict = {
"r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
}
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed \n Expected: Routes should not be present in OSPF RIB. \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed\n Expected: Routes should not be present in RIB\n"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
step("restart zebrad")
kill_router_daemons(tgen, "r0", ["zebra"])
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
- step("Verify that originally advertised routes are withdraw from there" " peer.")
+ step("Verify that originally advertised routes are withdraw from there peer.")
input_dict = {
"r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
}
dut = "r1"
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
- assert (
- result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
- tc_name, result
+ assert result is not True, (
+ "Testcase {} : Failed\n Expected: Routes should not be present in OSPF RIB. \n Error: "
+ "Routes still present in OSPF RIB {}".format(tc_name, result)
)
result = verify_rib(
tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
)
- assert (
- result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ assert result is not True, (
+ "Testcase {} : Failed\n Expected: Routes should not be present in RIB.\n"
+ "Error: Routes still present in RIB".format(tc_name)
+ )
write_test_footer(tc_name)
pytest.skip(tgen.errors)
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error {}".format(
ospf_covergence
)
result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
- step(
- "Configure External Route summary in R0 to summarise 5" " routes to one route."
- )
+ step("Configure External Route summary in R0 to summarise 5 routes to one route.")
ospf_summ_r0 = {
"r0": {
"route is sent to R1."
)
- step(
- "Configure summary & redistribute static/connected route with " "metric type 2"
- )
+ step("Configure summary & redistribute static/connected route with metric type 2")
input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][3]}]}}
dut = "r1"
result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step("Learn type 7 lsa from neighbours")
result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Routes is missing in RIB".format(tc_name)
ospf_summ_r0 = {
"r0": {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
step("Verify that already originated summary is intact.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed. Error: Summary missing in OSPF DB".format(tc_name)
dut = "r1"
aggr_timer = {"r1": {"ospf": {"aggr_timer": 6}}}
pytest.skip(tgen.errors)
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error {}".format(
ospf_covergence
)
step("Verify that the neighbour is not FULL between R1 and R2.")
dut = "r1"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut, expected=False)
- assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is not True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
ospf_covergence = verify_ospf_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=10
)
- assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is not True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
"show ip ospf neighbor cmd."
)
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut, expected=False)
- assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is not True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r1"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
ospf_covergence = verify_ospf_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=6
)
- assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is not True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
ospf_covergence = verify_ospf_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=10
)
- assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is not True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
"show ip ospf neighbor cmd."
)
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut, expected=False)
- assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is not True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r1"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
ospf_covergence = verify_ospf_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=10
)
- assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is not True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
pytest.skip(tgen.errors)
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error {}".format(
ospf_covergence
)
step("Verify OSPF neighbors after base config is done.")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r0"
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut, expected=False)
- assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is not True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: OSPF routes are present \n Error: {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: routes are still present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: routes are still present \n Error: {}".format(
tc_name, result
)
step("Verify OSPF neighbors are up after bringing back ospfd in R0")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r1"
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut, expected=False)
- assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is not True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
step("Verify OSPF neighbors are up after bringing back ospfd in R1")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
step("Verify OSPF neighbors after base config is done.")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
step("Verify OSPF neighbors are up after restarting R0")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
step("Verify OSPF neighbors are up after restarting R1")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
step("Verify OSPF neighbors after base config is done.")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: OSPF routes are present \n Error: {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: routes are still present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: routes are still present \n Error: {}".format(
tc_name, result
)
step("Verify OSPF neighbors are up after bringing back ospfd in R0")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
step("Verify OSPF neighbors are up after bringing back ospfd in R1")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
pytest.skip(tgen.errors)
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error {}".format(
ospf_covergence
)
step("Verify that OSPF is up with 8 neighborship sessions.")
dut = "r1"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: OSPF routes are present \n Error: {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: routes are still present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: routes are still present \n Error: {}".format(
tc_name, result
)
step("Verify that OSPF is up with 8 neighborship sessions.")
dut = "r1"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: OSPF routes are present \n Error: {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: routes are still present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: routes are still present \n Error: {}".format(
tc_name, result
)
step("Verify that OSPF is up with 2 neighborship sessions.")
dut = "r1"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: OSPF routes are present \n Error: {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: routes are still present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: routes are still present \n Error: {}".format(
tc_name, result
)
pytest.skip(tgen.errors)
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error {}".format(
ospf_covergence
)
step("Verify that OSPF is up with 8 neighborship sessions.")
ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r0"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut, lan=True)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut, lan=True)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: OSPF routes are present \n Error: {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: routes are still present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: routes are still present \n Error: {}".format(
tc_name, result
)
pytest.skip(tgen.errors)
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error {}".format(
ospf_covergence
)
result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step(
- "Configure DR pririty 100 on R0 and clear ospf neighbors " "on all the routers."
- )
+ step("Configure DR pririty 100 on R0 and clear ospf neighbors on all the routers.")
input_dict = {
"r0": {
result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step(
- "Configure DR pririty 150 on R0 and clear ospf neighbors " "on all the routers."
- )
+ step("Configure DR pririty 150 on R0 and clear ospf neighbors on all the routers.")
input_dict = {
"r0": {
result = verify_ospf_neighbor(tgen, topo, dut, lan=True, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r0: OSPF neighbors-hip is up \n Error: {}".format(
+ ), "Testcase {} : Failed \n r0: OSPF neighbors-hip is up \n Error: {}".format(
tc_name, result
)
pytest.skip(tgen.errors)
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error {}".format(
ospf_covergence
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
- step("Change area 1 as non nssa area (on the fly changing area" " type on DUT).")
+ step("Change area 1 as non nssa area (on the fly changing area type on DUT).")
for rtr in ["r1", "r2", "r3"]:
input_dict = {
pytest.skip(tgen.errors)
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error {}".format(
ospf_covergence
)
redistribute_ospf(tgen, topo, "r0", "static", delete=True)
- step(
- "Create prefix-list in R0 to permit 10.0.20.1/32 prefix &" " deny 10.0.20.2/32"
- )
+ step("Create prefix-list in R0 to permit 10.0.20.1/32 prefix & deny 10.0.20.2/32")
# Create ip prefix list
pfx_list = {
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: OSPF routes are present \n Error: {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: routes are present in fib \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: routes are present in fib \n Error: {}".format(
tc_name, result
)
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: OSPF routes are present \n Error: {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: OSPF routes are present \n Error: {}".format(
tc_name, result
)
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: OSPF routes are present \n Error: {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: routes are still present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: routes are still present \n Error: {}".format(
tc_name, result
)
result = verify_ospf_rib(tgen, dut, input_dict, retry_timeout=4, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: OSPF routes are present \n Error: {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: routes are still present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: routes are still present \n Error: {}".format(
tc_name, result
)
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: OSPF routes are present \n Error: {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: routes are still present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: routes are still present \n Error: {}".format(
tc_name, result
)
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: OSPF routes are present \n Error: {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: routes are still present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: routes are still present \n Error: {}".format(
tc_name, result
)
result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: OSPF routes are present \n Error: {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed \n " "r1: routes are still present \n Error: {}".format(
+ ), "Testcase {} : Failed \n r1: routes are still present \n Error: {}".format(
tc_name, result
)
result = verify_prefix_lists(tgen, pfx_list)
assert (
result is not True
- ), "Testcase {} : Failed \n Prefix list not " "present. Error: {}".format(
+ ), "Testcase {} : Failed \n Prefix list not present. Error: {}".format(
tc_name, result
)
result = verify_prefix_lists(tgen, pfx_list)
assert (
result is not True
- ), "Testcase {} : Failed \n Prefix list not " "present. Error: {}".format(
+ ), "Testcase {} : Failed \n Prefix list not present. Error: {}".format(
tc_name, result
)
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
pytest.skip(tgen.errors)
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error {}".format(
ospf_covergence
)
step("Verify that OSPF neighbors are FULL.")
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
step("Verify that OSPF neighbors are FULL.")
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
step("Verify that OSPF neighbours are reset and forms new adjacencies.")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
step("Verify that OSPF neighbours are Full.")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
pytest.skip(tgen.errors)
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error {}".format(
ospf_covergence
)
# Api call verify whether BGP is converged
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
step("verify that ospf neighbours are full")
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
step("verify that ospf neighbours are full")
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
step("verify that ospf neighbours are full")
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
step("verify that ospf neighbours are full")
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
reset_config_on_routers(tgen)
ospf_covergence = verify_ospf_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
dut = "r1"
result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step("modify dead interval from default value to r1" "dead interval timer on r2")
+ step("modify dead interval from default value to r1 dead interval timer on r2")
topo1 = {
"r0": {
step("verify that ospf neighbours are full")
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
- step("reconfigure the default dead interval timer value to " "default on r1 and r2")
+ step("reconfigure the default dead interval timer value to default on r1 and r2")
topo1 = {
"r0": {
"links": {
step("verify that ospf neighbours are full")
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
step("verify that ospf neighbours are full")
ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error {}".format(
ospf_covergence
)
result = create_interfaces_cfg(tgen, topo1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step(
- "Verify that timer value is deleted from intf & " "set to default value 40 sec."
- )
+ step("Verify that timer value is deleted from intf & set to default value 40 sec.")
input_dict = {"r1": {"links": {"r0": {"ospf": {"timerDeadSecs": 40}}}}}
dut = "r1"
result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict)
clear_ospf(tgen, "r0")
- step(
- "Verify that OSPF neighborship between R0 and R1 is stuck in Exstart" " State."
- )
+ step("Verify that OSPF neighborship between R0 and R1 is stuck in Exstart State.")
result = verify_ospf_neighbor(tgen, topo, expected=False)
assert result is not True, (
"Testcase {} : Failed \n OSPF nbrs are Full "
"instead of Exstart. Error: {}".format(tc_name, result)
)
- step(
- "Verify that configured MTU value is updated in the show ip " "ospf interface."
- )
+ step("Verify that configured MTU value is updated in the show ip ospf interface.")
dut = "r0"
input_dict = {"r0": {"links": {"r1": {"ospf": {"mtuBytes": 1200}}}}}
clear_ospf(tgen, "r0")
- step(
- "Verify that OSPF neighborship between R0 and R1 is stuck in Exstart" " State."
- )
+ step("Verify that OSPF neighborship between R0 and R1 is stuck in Exstart State.")
result = verify_ospf_neighbor(tgen, topo, expected=False)
assert result is not True, (
"Testcase {} : Failed \n OSPF nbrs are Full "
result = verify_ospf_neighbor(tgen, topo)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step(
- "Configure ospf interface with jumbo MTU (9216)." "Reset ospf neighbors on R0."
- )
+ step("Configure ospf interface with jumbo MTU (9216). Reset ospf neighbors on R0.")
rtr0.run("ip link set {} mtu 9216".format(r0_r1_intf))
rtr1.run("ip link set {} mtu 9216".format(r1_r0_intf))
pytest.skip(tgen.errors)
ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error: {}".format(
ospf_covergence
)
ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
assert (
ospf_covergence is True
- ), "OSPF is not after reset config \n Error:" " {}".format(ospf_covergence)
+ ), "OSPF is not after reset config \n Error: {}".format(ospf_covergence)
- step("Verify that GR helper route is disabled by default to the in" "the DUT.")
+ step("Verify that GR helper route is disabled by default to the in the DUT.")
input_dict = {
"helperSupport": "Disabled",
"strictLsaCheck": "Enabled",
result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step("Verify that DUT does not enter helper mode upon receiving the " "grace lsa.")
+ step("Verify that DUT does not enter helper mode upon receiving the grace lsa.")
# send grace lsa
scapy_send_raw_packet(tgen, topo, "r1", intf1, pkt)
result = verify_ospf_gr_helper(tgen, topo, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed. DUT entered helper role " " \n Error: {}".format(
+ ), "Testcase {} : Failed. DUT entered helper role \n Error: {}".format(
tc_name, result
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Perform GR in RR.")
- step("Verify that DUT does enter helper mode upon receiving" " the grace lsa.")
+ step("Verify that DUT does enter helper mode upon receiving the grace lsa.")
input_dict = {"activeRestarterCnt": 1}
gracelsa_sent = False
repeat = 0
result = create_router_ospf(tgen, topo, ospf_gr_r0)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step("Verify that DUT does enter helper mode upon receiving" " the grace lsa.")
+ step("Verify that DUT does enter helper mode upon receiving the grace lsa.")
input_dict = {"activeRestarterCnt": 1}
gracelsa_sent = False
repeat = 0
result = create_router_ospf(tgen, topo, ospf_gr_r0)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step("Verify that GR helper router is disabled in the DUT for" " router id x.x.x.x")
+ step("Verify that GR helper router is disabled in the DUT for router id x.x.x.x")
input_dict = {"enabledRouterIds": [{"routerId": "1.1.1.1"}]}
dut = "r0"
result = verify_ospf_gr_helper(tgen, topo, dut, input_dict, expected=False)
ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
assert (
ospf_covergence is True
- ), "OSPF is not after reset config \n Error:" " {}".format(ospf_covergence)
+ ), "OSPF is not after reset config \n Error: {}".format(ospf_covergence)
ospf_gr_r0 = {
"r0": {"ospf": {"graceful-restart": {"helper enable": [], "opaque": True}}}
}
pytest.skip(tgen.errors)
ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error: {}".format(
ospf_covergence
)
ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
assert (
ospf_covergence is True
- ), "OSPF is not after reset config \n Error:" " {}".format(ospf_covergence)
- step(
- "Configure DR pririty 100 on R0 and clear ospf neighbors " "on all the routers."
- )
+ ), "OSPF is not after reset config \n Error: {}".format(ospf_covergence)
+ step("Configure DR pririty 100 on R0 and clear ospf neighbors on all the routers.")
input_dict = {
"r0": {
ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
assert (
ospf_covergence is True
- ), "OSPF is not after reset config \n Error:" " {}".format(ospf_covergence)
- step(
- "Configure DR pririty 100 on R0 and clear ospf neighbors " "on all the routers."
- )
+ ), "OSPF is not after reset config \n Error: {}".format(ospf_covergence)
+ step("Configure DR pririty 100 on R0 and clear ospf neighbors on all the routers.")
input_dict = {
"r0": {
pytest.skip(tgen.errors)
ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error: {}".format(
ospf_covergence
)
ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
assert (
ospf_covergence is True
- ), "OSPF is not after reset config \n Error:" " {}".format(ospf_covergence)
+ ), "OSPF is not after reset config \n Error: {}".format(ospf_covergence)
ospf_gr_r0 = {
"r0": {"ospf": {"graceful-restart": {"helper enable": [], "opaque": True}}}
}
result = verify_ospf_gr_helper(tgen, topo, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed. DUT entered helper role " " \n Error: {}".format(
+ ), "Testcase {} : Failed. DUT entered helper role \n Error: {}".format(
tc_name, result
)
ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
assert (
ospf_covergence is True
- ), "OSPF is not after reset config \n Error:" " {}".format(ospf_covergence)
+ ), "OSPF is not after reset config \n Error: {}".format(ospf_covergence)
ospf_gr_r0 = {
"r0": {"ospf": {"graceful-restart": {"helper enable": [], "opaque": True}}}
}
pytest.skip(tgen.errors)
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error: {}".format(
ospf_covergence
)
result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step(
"Configure External Route summary in R0 to summarise 5"
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
- step("Verify that originally advertised routes are withdraw from there" " peer.")
+ step("Verify that originally advertised routes are withdraw from there peer.")
input_dict = {
"r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes still present in RIB".format(tc_name)
step("Delete the configured summary")
ospf_summ_r1 = {
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
- tc_name
- )
+ ), "Testcase {} : Failed Error: Summary Route still present in RIB".format(tc_name)
step("show ip ospf summary should not have any summary address.")
input_dict = {
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary still present in DB".format(tc_name)
dut = "r1"
step("All 5 routes are advertised after deletion of configured summary.")
result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("configure the summary again and delete static routes .")
ospf_summ_r1 = {
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
input_dict = {
"r0": {
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes still present in RIB".format(tc_name)
step("Add back static routes.")
input_dict_static_rtes = {
result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes still present in RIB".format(tc_name)
input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
dut = "r1"
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show configure summaries.")
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step("Configure new static route which is matching configured summary.")
input_dict_static_rtes = {
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Shut one of the interface")
intf = topo["routers"]["r0"]["links"]["r3-link0"]["interface"]
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
ospf_summ_r1 = {
"r0": {
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes still present in RIB".format(tc_name)
ospf_summ_r1 = {
"r0": {
result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
- step(
- "Configure External Route summary in R0 to summarise 5" " routes to one route."
- )
+ step("Configure External Route summary in R0 to summarise 5 routes to one route.")
ospf_summ_r1 = {
"r0": {
"ospf6": {
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step("Change the summary address mask to lower match (ex - 16 to 8)")
ospf_summ_r1 = {
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step(
"Verify that external routes(static / connected) are summarised"
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Change the summary address mask to higher match (ex - 8 to 24)")
ospf_summ_r1 = {
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step(
"Verify that external routes(static / connected) are summarised"
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step(" Un configure one of the summary address.")
ospf_summ_r1 = {
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
ospf_summ_r1 = {
"r0": {
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
write_test_footer(tc_name)
result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
- step(
- "Configure External Route summary in R0 to summarise 5" " routes to one route."
- )
+ step("Configure External Route summary in R0 to summarise 5 routes to one route.")
ospf_summ_r1 = {
"r0": {
"ospf6": {
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries with tag.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step("Delete the configured summary")
ospf_summ_r1 = {
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
- tc_name
- )
+ ), "Testcase {} : Failed Error: Summary Route still present in RIB".format(tc_name)
step("show ip ospf summary should not have any summary address.")
input_dict = {
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary still present in DB".format(tc_name)
step("Configure Min tag value")
ospf_summ_r1 = {
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries with tag.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step("Configure Max Tag Value")
ospf_summ_r1 = {
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step(
"Verify that boundary values tags are used for summary route"
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step("configure new static route with different tag.")
input_dict_static_rtes_11 = {
)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes still present in RIB".format(tc_name)
step(
"Verify that boundary values tags are used for summary route"
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step("Delete the configured summary address")
ospf_summ_r1 = {
result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Verify that summary address is flushed from neighbor.")
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes still present in RIB".format(tc_name)
step("Configure summary first & then configure matching static route.")
result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
- step(
- "Configure External Route summary in R0 to summarise 5" " routes to one route."
- )
+ step("Configure External Route summary in R0 to summarise 5 routes to one route.")
ospf_summ_r1 = {
"r0": {
"ospf6": {
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries with tag.")
input_dict = {
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
- tc_name
- )
+ ), "Testcase {} : Failed Error: Summary Route still present in RIB".format(tc_name)
step("show ip ospf summary should not have any summary address.")
input_dict = {
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary still present in DB".format(tc_name)
step("Configure Min tag value")
ospf_summ_r1 = {
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries with tag.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step("Configure Max Tag Value")
ospf_summ_r1 = {
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step(
"Verify that boundary values tags are used for summary route"
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step("configure new static route with different tag.")
input_dict_static_rtes_11 = {
)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes still present in RIB".format(tc_name)
step(
"Verify that boundary values tags are used for summary route"
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step("Delete the configured summary address")
ospf_summ_r1 = {
result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Verify that summary address is flushed from neighbor.")
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes still present in RIB".format(tc_name)
step("Configure summary first & then configure matching static route.")
result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step(
"Configure External Route summary in R0 to summarise 5"
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes still present in RIB".format(tc_name)
- step("Verify that show ip ospf summary should show the " "configured summaries.")
+ step("Verify that show ip ospf summary should show the configured summaries.")
input_dict = {
SUMMARY["ipv6"][0]: {
"summaryAddress": SUMMARY["ipv6"][0],
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step("Delete the configured summary")
ospf_summ_r1 = {
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
- tc_name
- )
+ ), "Testcase {} : Failed Error: Summary Route still present in RIB".format(tc_name)
step("show ip ospf summary should not have any summary address.")
input_dict = {
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary still present in DB".format(tc_name)
step("Reconfigure summary with no advertise.")
ospf_summ_r1 = {
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes still present in RIB".format(tc_name)
- step("Verify that show ip ospf summary should show the " "configured summaries.")
+ step("Verify that show ip ospf summary should show the configured summaries.")
input_dict = {
SUMMARY["ipv6"][0]: {
"summaryAddress": SUMMARY["ipv6"][0],
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step(
"Change summary address from no advertise to advertise "
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
- step("Verify that originally advertised routes are withdraw from there" " peer.")
+ step("Verify that originally advertised routes are withdraw from there peer.")
output = tgen.gears["r0"].vtysh_cmd(
"show ipv6 ospf6 database as-external json", isjson=True
)
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes is present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is present in RIB".format(tc_name)
write_test_footer(tc_name)
result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
- step(
- "Configure External Route summary in R0 to summarise 5" " routes to one route."
- )
+ step("Configure External Route summary in R0 to summarise 5 routes to one route.")
ospf_summ_r1 = {
"r0": {
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
- step("Verify that originally advertised routes are withdraw from there" " peer.")
+ step("Verify that originally advertised routes are withdraw from there peer.")
input_dict = {
"r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes still present in RIB".format(tc_name)
step(
"Configure route map and & rule to permit configured summary address,"
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
input_dict = {
SUMMARY["ipv6"][0]: {
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step("Configure metric type as 1 in route map.")
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step("Un configure metric type from route map.")
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
step("Change rule from permit to deny in prefix list.")
pfx_list = {
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes still present in RIB".format(tc_name)
write_test_footer(tc_name)
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
write_test_footer(tc_name)
result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
- step(
- "Configure External Route summary in R0 to summarise 5" " routes to one route."
- )
+ step("Configure External Route summary in R0 to summarise 5 routes to one route.")
ospf_summ_r1 = {
"r0": {
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
- step("Verify that originally advertised routes are withdraw from there" " peer.")
+ step("Verify that originally advertised routes are withdraw from there peer.")
input_dict = {
"r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes still present in RIB".format(tc_name)
step("Reload the FRR router")
# stop/start -> restart FRR router and verify
result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
assert (
result is True
- ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
assert (
result is True
- ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ ), "Testcase {} : Failed Error: Summary missing in OSPF DB".format(tc_name)
- step("Verify that originally advertised routes are withdraw from there" " peer.")
+ step("Verify that originally advertised routes are withdraw from there peer.")
input_dict = {
"r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ ), "Testcase {} : Failed \n Error: Routes still present in OSPF RIB {}".format(
tc_name, result
)
)
assert (
result is not True
- ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+ ), "Testcase {} : Failed Error: Routes still present in RIB".format(tc_name)
write_test_footer(tc_name)
pytest.skip(tgen.errors)
ospf6_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf6_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "setup_module :Failed \n Error: {}".format(
ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=3
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=5
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
"show ip ospf6 neighbor cmd."
)
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut, expected=False)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=3
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=5
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
"show ip ospf6 neighbor cmd."
)
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut, expected=False)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=3
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=5
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
"show ip ospf6 neighbor cmd."
)
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut, expected=False)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=3
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=5
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
"show ip ospf6 neighbor cmd."
)
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut, expected=False)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=3
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=3
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=3
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=3
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=3
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=3
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=3
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=3
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=3
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
ospf6_covergence = verify_ospf6_neighbor(
tgen, topo, dut=dut, expected=False, retry_timeout=3
)
- assert ospf6_covergence is not True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
dut = "r2"
ospf6_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf6_covergence is True, "Testcase {} :Failed \n Error:" " {}".format(
+ assert ospf6_covergence is True, "Testcase {} :Failed \n Error: {}".format(
tc_name, ospf6_covergence
)
pytest.skip(tgen.errors)
ospf_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error: {}".format(
ospf_covergence
)
step("Verify that OSPF is up with 8 neighborship sessions.")
dut = "r1"
ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error: {}".format(
ospf_covergence
)
step("Verify that OSPF is up with 8 neighborship sessions.")
dut = "r1"
ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error: {}".format(
ospf_covergence
)
step("Verify that OSPF is up with 2 neighborship sessions.")
dut = "r1"
ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error: {}".format(
ospf_covergence
)
pytest.skip(tgen.errors)
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf6_neighbor(tgen, topo, lan=True)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error: {}".format(
ospf_covergence
)
step("Verify that OSPF is up with 8 neighborship sessions.")
ospf_covergence = verify_ospf6_neighbor(tgen, topo, lan=True)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error: {}".format(
ospf_covergence
)
dut = "r0"
ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut, lan=True)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error: {}".format(
ospf_covergence
)
dut = "r2"
ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut, lan=True)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error: {}".format(
ospf_covergence
)
pytest.skip(tgen.errors)
result = verify_ospf6_neighbor(tgen, topo)
- assert result is True, "setup_module: Failed \n Error:" " {}".format(result)
+ assert result is True, "setup_module: Failed \n Error: {}".format(result)
logger.info("Running setup_module() done")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error: {}".format(
ospf_covergence
)
result = verify_ospf6_neighbor(tgen, topo, dut="r2", expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Nbrs are not down" "Error: {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n Nbrs are not down Error: {}".format(tc_name, result)
step("Now configure area 0 on interface of r1 connecting to r2.")
result = verify_ospf6_neighbor(tgen, topo, dut="r2", expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Nbrs are not down" "Error: {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n Nbrs are not down Error: {}".format(tc_name, result)
step("Now configure area 2 on interface of r1 connecting to r2.")
result = verify_ospf6_neighbor(tgen, topo)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step("Change area 1 as non nssa area (on the fly changing area" " type on DUT).")
+ step("Change area 1 as non nssa area (on the fly changing area type on DUT).")
for rtr in ["r1", "r2", "r3"]:
input_dict = {
pytest.skip(tgen.errors)
ospf_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error: {}".format(
ospf_covergence
)
result = create_router_ospf(tgen, topo, ospf_red_r1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step(
- "Create prefix-list in R0 to permit 10.0.20.1/32 prefix &" " deny 10.0.20.2/32"
- )
+ step("Create prefix-list in R0 to permit 10.0.20.1/32 prefix & deny 10.0.20.2/32")
# Create ip prefix list
pfx_list = {
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error: {}".format(
ospf_covergence
)
result = verify_prefix_lists(tgen, pfx_list)
assert (
result is not True
- ), "Testcase {} : Failed \n Prefix list not " "present. Error: {}".format(
+ ), "Testcase {} : Failed \n Prefix list not present. Error: {}".format(
tc_name, result
)
result = verify_prefix_lists(tgen, pfx_list)
assert (
result is not True
- ), "Testcase {} : Failed \n Prefix list not " "present. Error: {}".format(
+ ), "Testcase {} : Failed \n Prefix list not present. Error: {}".format(
tc_name, result
)
pytest.skip(tgen.errors)
ospf_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error: {}".format(
ospf_covergence
)
step("Verify that OSPF neighbors are FULL.")
ospf_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error: {}".format(
ospf_covergence
)
step("Verify that OSPF neighbours are reset and forms new adjacencies.")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error: {}".format(
ospf_covergence
)
pytest.skip(tgen.errors)
ospf_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "setup_module :Failed \n Error: {}".format(
ospf_covergence
)
step("verify that ospf neighbours are full")
ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error: {}".format(
ospf_covergence
)
step("verify that ospf neighbours are full")
ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error: {}".format(
ospf_covergence
)
step("verify that ospf neighbours are full")
ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error: {}".format(
ospf_covergence
)
step("verify that ospf neighbours are full")
ospf_covergence = verify_ospf6_neighbor(tgen, topo, dut=dut)
- assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "Testcase Failed \n Error: {}".format(
ospf_covergence
)
result = create_interfaces_cfg(tgen, topo1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step(
- "Verify that timer value is deleted from intf & " "set to default value 40 sec."
- )
+ step("Verify that timer value is deleted from intf & set to default value 40 sec.")
input_dict = {"r1": {"links": {"r0": {"ospf6": {"timerIntervalsConfigHello": 10}}}}}
dut = "r1"
result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step("modify dead interval from default value to r1" "dead interval timer on r2")
+ step("modify dead interval from default value to r1 dead interval timer on r2")
topo1 = {
"r0": {
# reconfiguring deleted ospf process by resetting the configs.
reset_config_on_routers(tgen)
- step("reconfigure the default dead interval timer value to " "default on r1 and r2")
+ step("reconfigure the default dead interval timer value to default on r1 and r2")
topo1 = {
"r0": {
"links": {
result = create_interfaces_cfg(tgen, topo1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step(
- "Verify that timer value is deleted from intf & " "set to default value 40 sec."
- )
+ step("Verify that timer value is deleted from intf & set to default value 40 sec.")
input_dict = {"r1": {"links": {"r0": {"ospf6": {"timerIntervalsConfigDead": 40}}}}}
dut = "r1"
result = verify_ospf6_interface(tgen, topo, dut=dut, input_dict=input_dict)
clear_ospf(tgen, "r0", ospf="ospf6")
clear_ospf(tgen, "r1", ospf="ospf6")
- step(
- "Verify that OSPF neighborship between R0 and R1 is stuck in Exstart" " State."
- )
+ step("Verify that OSPF neighborship between R0 and R1 is stuck in Exstart State.")
result = verify_ospf6_neighbor(tgen, topo, expected=False)
assert result is not True, (
"Testcase {} : Failed \n OSPF nbrs are Full "
"instead of Exstart. Error: {}".format(tc_name, result)
)
- step(
- "Verify that configured MTU value is updated in the show ip " "ospf interface."
- )
+ step("Verify that configured MTU value is updated in the show ip ospf interface.")
dut = "r0"
input_dict = {"r0": {"links": {"r1": {"ospf6": {"interfaceMtu": 1400}}}}}
clear_ospf(tgen, "r0", ospf="ospf6")
- step(
- "Verify that OSPF neighborship between R0 and R1 is stuck in Exstart" " State."
- )
+ step("Verify that OSPF neighborship between R0 and R1 is stuck in Exstart State.")
result = verify_ospf6_neighbor(tgen, topo, expected=False)
assert result is not True, (
"Testcase {} : Failed \n OSPF nbrs are Full "
result = verify_ospf6_neighbor(tgen, topo)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step(
- "Configure ospf interface with jumbo MTU (9216)." "Reset ospf neighbors on R0."
- )
+ step("Configure ospf interface with jumbo MTU (9216). Reset ospf neighbors on R0.")
rtr0.run("ifconfig {} mtu 9216".format(r0_r1_intf))
rtr1.run("ifconfig {} mtu 9216".format(r1_r0_intf))
result = create_debug_log_config(tgen, input_dict)
- # Code coverage steps #Do Not upstream
- input_dict_config = {
- "r1": {
- "raw_config": [
- "end",
- "debug ospf6 event",
- "debug ospf6 gr helper",
- "debug ospf6 ism events",
- "debug ospf6 ism status",
- "debug ospf6 ism timers",
- "debug ospf6 nsm events",
- "debug ospf6 nsm status",
- "debug ospf6 nsm timers ",
- "debug ospf6 nssa",
- "debug ospf6 lsa aggregate",
- "debug ospf6 lsa flooding ",
- "debug ospf6 lsa generate",
- "debug ospf6 lsa install ",
- "debug ospf6 lsa refresh",
- "debug ospf6 packet all detail",
- "debug ospf6 packet all recv",
- "debug ospf6 packet all send",
- "debug ospf6 packet dd detail",
- "debug ospf6 packet dd recv",
- "debug ospf6 packet dd send ",
- "debug ospf6 packet hello detail",
- "debug ospf6 packet hello recv",
- "debug ospf6 packet hello send",
- "debug ospf6 packet ls-ack detail",
- "debug ospf6 packet ls-ack recv",
- "debug ospf6 packet ls-ack send",
- "debug ospf6 packet ls-request detail",
- "debug ospf6 packet ls-request recv",
- "debug ospf6 packet ls-request send",
- "debug ospf6 packet ls-update detail",
- "debug ospf6 packet ls-update recv",
- "debug ospf6 packet ls-update send",
- "debug ospf6 sr",
- "debug ospf6 te ",
- "debug ospf6 zebra interface",
- "debug ospf6 zebra redistribute",
- ]
- }
- }
-
- apply_raw_config(tgen, input_dict_config)
-
for rtr in topo["routers"]:
clear_ospf(tgen, rtr, ospf="ospf6")
clear_ospf(tgen, rtr, ospf="ospf6")
ospf_covergence = verify_ospf6_neighbor(tgen, topo1)
- assert ospf_covergence is True, "OSPF NBRs not up.Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "OSPF NBRs not up.Failed \n Error: {}".format(
ospf_covergence
)
assert result is True, "Testcase : Failed \n Error: {}".format(result)
ospf_covergence = verify_ospf6_neighbor(tgen, topo, expected=False)
- assert (
- ospf_covergence is not True
- ), "OSPF NBRs are up.Failed \n Error:" " {}".format(ospf_covergence)
+ assert ospf_covergence is not True, "OSPF NBRs are up.Failed \n Error: {}".format(
+ ospf_covergence
+ )
topo1 = {}
topo1 = deepcopy(topo)
topo1["routers"]["r3"]["ospf6"]["router_id"] = "1.1.1.4"
ospf_covergence = verify_ospf6_neighbor(tgen, topo1)
- assert ospf_covergence is True, "OSPF NBRs not up.Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "OSPF NBRs not up.Failed \n Error: {}".format(
ospf_covergence
)
reset_config_on_routers(tgen)
ospf_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf_covergence is True, "OSPF NBRs not up.Failed \n Error:" " {}".format(
+ assert ospf_covergence is True, "OSPF NBRs not up.Failed \n Error: {}".format(
ospf_covergence
)
--- /dev/null
+log timestamp precision 3
--- /dev/null
+log timestamp precision 3
--- /dev/null
+log timestamp precision 3
+
+interface r1-eth0
+ ip address 101.0.0.1/24
+ ipv6 address 2101::1/64
+exit
+
+interface r1-eth1 vrf red
+ ip address 102.0.0.1/24
+ ipv6 address 2102::1/64
+exit
--- /dev/null
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C.
+# Copyright (c) 2019-2020 by
+# Donatas Abraitis <donatas.abraitis@gmail.com>
+#
+"""
+Test static route functionality
+"""
+
+import datetime
+import ipaddress
+import math
+import os
+import sys
+import re
+
+import pytest
+from lib.topogen import TopoRouter, Topogen, get_topogen
+from lib.topolog import logger
+from lib.common_config import retry, step
+
+pytestmark = [pytest.mark.staticd]
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
+
+ topodef = {"s1": ("r1",), "s2": ("r1",)}
+
+ tgen = Topogen(topodef, request.module.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ for rname, router in router_list.items():
+ # Setup VRF red
+ router.net.add_l3vrf("red", 10)
+ router.net.add_loop("lo-red")
+ router.net.attach_iface_to_l3vrf("lo-red", "red")
+ router.net.attach_iface_to_l3vrf(rname + "-eth1", "red")
+ # and select daemons to run
+ router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
+ router.load_config(TopoRouter.RD_MGMTD)
+ router.load_config(TopoRouter.RD_STATIC)
+
+ tgen.start_router()
+ yield tgen
+ tgen.stop_topology()
+
+
+def get_ip_networks(super_prefix, count):
+ count_log2 = math.log(count, 2)
+ if count_log2 != int(count_log2):
+ count_log2 = int(count_log2) + 1
+ else:
+ count_log2 = int(count_log2)
+ network = ipaddress.ip_network(super_prefix)
+ return tuple(network.subnets(count_log2))[0:count]
+
+
+def enable_debug(router):
+ router.vtysh_cmd("debug northbound callbacks configuration")
+
+
+def disable_debug(router):
+ router.vtysh_cmd("no debug northbound callbacks configuration")
+
+
+@retry(retry_timeout=3, initial_wait=0.1)
+def check_kernel(r1, super_prefix, count, add, is_blackhole, vrf, matchvia):
+ network = ipaddress.ip_network(super_prefix)
+ vrfstr = f" vrf {vrf}" if vrf else ""
+ if network.version == 6:
+ kernel = r1.run(f"ip -6 route show{vrfstr}")
+ else:
+ kernel = r1.run(f"ip -4 route show{vrfstr}")
+
+ logger.debug("checking kernel routing table%s:\n%s", vrfstr, kernel)
+ for i, net in enumerate(get_ip_networks(super_prefix, count)):
+ if not add:
+ assert str(net) not in kernel
+ continue
+
+ if is_blackhole:
+ route = f"blackhole {str(net)} proto (static|196) metric 20"
+ else:
+ route = (
+ f"{str(net)}(?: nhid [0-9]+)? {matchvia} "
+ "proto (static|196) metric 20"
+ )
+ assert re.search(route, kernel), f"Failed to find \n'{route}'\n in \n'{kernel}'"
+
+
+def do_config(
+ r1,
+ count,
+ add=True,
+ do_ipv6=False,
+ via=None,
+ vrf=None,
+ use_cli=False,
+):
+ optype = "adding" if add else "removing"
+ iptype = "IPv6" if do_ipv6 else "IPv4"
+
+ #
+ # Set the route details
+ #
+
+ if vrf:
+ super_prefix = "2002::/48" if do_ipv6 else "20.0.0.0/8"
+ else:
+ super_prefix = "2001::/48" if do_ipv6 else "10.0.0.0/8"
+
+ matchtype = ""
+ matchvia = ""
+ if via == "blackhole":
+ pass
+ elif via:
+ matchvia = f"dev {via}"
+ else:
+ if vrf:
+ via = "2102::2" if do_ipv6 else "102.0.0.2"
+ matchvia = f"via {via} dev r1-eth1"
+ else:
+ via = "2101::2" if do_ipv6 else "101.0.0.2"
+ matchvia = f"via {via} dev r1-eth0"
+
+ vrfdbg = " in vrf {}".format(vrf) if vrf else ""
+ logger.debug("{} {} static {} routes{}".format(optype, count, iptype, vrfdbg))
+
+ #
+ # Generate config file in a retrievable place
+ #
+
+ config_file = os.path.join(
+ r1.logdir, r1.name, "{}-routes-{}.conf".format(iptype.lower(), optype)
+ )
+ with open(config_file, "w") as f:
+ if use_cli:
+ f.write("configure terminal\n")
+ if vrf:
+ f.write("vrf {}\n".format(vrf))
+
+ for i, net in enumerate(get_ip_networks(super_prefix, count)):
+ if add:
+ f.write("ip route {} {}\n".format(net, via))
+ else:
+ f.write("no ip route {} {}\n".format(net, via))
+
+ #
+ # Load config file.
+ #
+
+ if use_cli:
+ load_command = 'vtysh < "{}"'.format(config_file)
+ else:
+ load_command = 'vtysh -f "{}"'.format(config_file)
+ tstamp = datetime.datetime.now()
+ output = r1.cmd_raises(load_command)
+ delta = (datetime.datetime.now() - tstamp).total_seconds()
+
+ #
+ # Verify the results are in the kernel
+ #
+ check_kernel(r1, super_prefix, count, add, via == "blackhole", vrf, matchvia)
+
+ optyped = "added" if add else "removed"
+ logger.debug(
+ "{} {} {} static routes under {}{} in {}s".format(
+ optyped, count, iptype.lower(), super_prefix, vrfdbg, delta
+ )
+ )
+
+
+def guts(tgen, vrf, use_cli):
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.routers()["r1"]
+
+ step("add via gateway", reset=True)
+ do_config(r1, 1, True, False, vrf=vrf, use_cli=use_cli)
+ step("remove via gateway")
+ do_config(r1, 1, False, False, vrf=vrf, use_cli=use_cli)
+
+ via = f"lo-{vrf}" if vrf else "lo"
+ step("add via loopback")
+ do_config(r1, 1, True, False, via=via, vrf=vrf, use_cli=use_cli)
+ step("remove via loopback")
+ do_config(r1, 1, False, False, via=via, vrf=vrf, use_cli=use_cli)
+
+ step("add via blackhole")
+ do_config(r1, 1, True, False, via="blackhole", vrf=vrf, use_cli=use_cli)
+ step("remove via blackhole")
+ do_config(r1, 1, False, False, via="blackhole", vrf=vrf, use_cli=use_cli)
+
+
+def test_static_cli(tgen):
+ guts(tgen, "", True)
+
+
+def test_static_file(tgen):
+ guts(tgen, "", False)
+
+
+def test_static_vrf_cli(tgen):
+ guts(tgen, "red", True)
+
+
+def test_static_vrf_file(tgen):
+ guts(tgen, "red", False)
#
vtysh_enable=yes
zebra_options=" -A 127.0.0.1 -s 90000000"
+mgmtd_options=" -A 127.0.0.1"
bgpd_options=" -A 127.0.0.1"
ospfd_options=" -A 127.0.0.1"
ospf6d_options=" -A ::1"
{
#define thread_prefix "_"
static const char *const names[] = {
- thread_prefix "thread_add_read_write",
- thread_prefix "thread_add_timer",
- thread_prefix "thread_add_timer_msec",
- thread_prefix "thread_add_timer_tv",
- thread_prefix "thread_add_event",
- thread_prefix "thread_execute",
+ thread_prefix "event_add_read_write",
+ thread_prefix "event_add_timer",
+ thread_prefix "event_add_timer_msec",
+ thread_prefix "event_add_timer_tv",
+ thread_prefix "event_add_event",
+ thread_prefix "event_execute",
};
size_t i;
D_PATH="@CFG_SBIN@" # /usr/lib/frr
C_PATH="@CFG_SYSCONF@" # /etc/frr
V_PATH="@CFG_STATE@" # /var/run/frr
+B_PATH="@CFG_BIN@"
VTYSH="@vtysh_bin@" # /usr/bin/vtysh
FRR_USER="@enable_user@" # frr
FRR_GROUP="@enable_group@" # frr
# Local Daemon selection may be done by using /etc/frr/daemons.
# See /usr/share/doc/frr/README.Debian.gz for further information.
# Keep zebra first and do not list watchfrr!
-DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd pim6d ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd"
+DAEMONS="mgmtd zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd pim6d ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd"
MAX_INSTANCES=5
RELOAD_SCRIPT="$D_PATH/frr-reload.py"
NEW_CONFIG_FILE="${2:-$C_PATH/frr.conf}"
[ ! -r $NEW_CONFIG_FILE ] && echo "Unable to read new configuration file $NEW_CONFIG_FILE" && exit 1
echo "Applying only incremental changes to running configuration from frr.conf"
- "$RELOAD_SCRIPT" --reload --bindir "$D_PATH" --confdir "$C_PATH" --rundir "$V_PATH" "$C_PATH/frr.conf"
+ "$RELOAD_SCRIPT" --reload --bindir "$B_PATH" --confdir "$C_PATH" --rundir "$V_PATH" "$C_PATH/frr.conf"
exit $?
;;
D_PATH="@CFG_SBIN@" # /usr/lib/frr
C_PATH="@CFG_SYSCONF@${suffix}" # /etc/frr
V_PATH="@CFG_STATE@${suffix}" # /var/run/frr
+B_PATH="@CFG_BIN@"
VTYSH="@vtysh_bin@" # /usr/bin/vtysh
FRR_USER="@enable_user@" # frr
FRR_GROUP="@enable_group@" # frr
# - keep zebra first
# - watchfrr does NOT belong in this list
-DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd pim6d ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd"
+DAEMONS="zebra mgmtd bgpd ripd ripngd ospfd ospf6d isisd babeld pimd pim6d ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd"
RELOAD_SCRIPT="$D_PATH/frr-reload.py"
#
for daemon in $DAEMONS; do
eval cfg=\$$daemon
eval inst=\$${daemon}_instances
- [ "$daemon" = zebra -o "$daemon" = staticd ] && cfg=yes
+ [ "$daemon" = zebra -o "$daemon" = staticd -o "$daemon" = mgmtd ] && cfg=yes
if [ -n "$cfg" -a "$cfg" != "no" -a "$cfg" != "0" ]; then
if ! daemon_prep "$daemon" "$inst"; then
continue
NEW_CONFIG_FILE="${2:-$C_PATH/frr.conf}"
[ ! -r $NEW_CONFIG_FILE ] && log_failure_msg "Unable to read new configuration file $NEW_CONFIG_FILE" && exit 1
- "$RELOAD_SCRIPT" --reload --bindir "$D_PATH" --confdir "$C_PATH" --rundir "$V_PATH" "$NEW_CONFIG_FILE" `echo $nsopt`
+ "$RELOAD_SCRIPT" --reload --bindir "$B_PATH" --confdir "$C_PATH" --rundir "$V_PATH" "$NEW_CONFIG_FILE" `echo $nsopt`
exit $?
;;
/* Forward decls */
static void vrrp_change_state(struct vrrp_router *r, int to);
-static void vrrp_adver_timer_expire(struct thread *thread);
-static void vrrp_master_down_timer_expire(struct thread *thread);
+static void vrrp_adver_timer_expire(struct event *thread);
+static void vrrp_master_down_timer_expire(struct event *thread);
/*
* Finds the first connected address of the appropriate family on a VRRP
if (pkt->hdr.priority == 0) {
vrrp_send_advertisement(r);
- THREAD_OFF(r->t_adver_timer);
- thread_add_timer_msec(
- master, vrrp_adver_timer_expire, r,
- r->vr->advertisement_interval * CS2MS,
- &r->t_adver_timer);
+ EVENT_OFF(r->t_adver_timer);
+ event_add_timer_msec(master, vrrp_adver_timer_expire, r,
+ r->vr->advertisement_interval *
+ CS2MS,
+ &r->t_adver_timer);
} else if (pkt->hdr.priority > r->priority
|| ((pkt->hdr.priority == r->priority)
&& addrcmp > 0)) {
"Received advertisement from %s w/ priority %hhu; switching to Backup",
r->vr->vrid, family2str(r->family), sipstr,
pkt->hdr.priority);
- THREAD_OFF(r->t_adver_timer);
+ EVENT_OFF(r->t_adver_timer);
if (r->vr->version == 3) {
r->master_adver_interval =
htons(pkt->hdr.v3.adver_int);
}
vrrp_recalculate_timers(r);
- THREAD_OFF(r->t_master_down_timer);
- thread_add_timer_msec(master,
- vrrp_master_down_timer_expire, r,
- r->master_down_interval * CS2MS,
- &r->t_master_down_timer);
+ EVENT_OFF(r->t_master_down_timer);
+ event_add_timer_msec(master,
+ vrrp_master_down_timer_expire, r,
+ r->master_down_interval * CS2MS,
+ &r->t_master_down_timer);
vrrp_change_state(r, VRRP_STATE_BACKUP);
} else {
/* Discard advertisement */
break;
case VRRP_STATE_BACKUP:
if (pkt->hdr.priority == 0) {
- THREAD_OFF(r->t_master_down_timer);
- thread_add_timer_msec(
+ EVENT_OFF(r->t_master_down_timer);
+ event_add_timer_msec(
master, vrrp_master_down_timer_expire, r,
r->skew_time * CS2MS, &r->t_master_down_timer);
} else if (!r->vr->preempt_mode
ntohs(pkt->hdr.v3.adver_int);
}
vrrp_recalculate_timers(r);
- THREAD_OFF(r->t_master_down_timer);
- thread_add_timer_msec(master,
- vrrp_master_down_timer_expire, r,
- r->master_down_interval * CS2MS,
- &r->t_master_down_timer);
+ EVENT_OFF(r->t_master_down_timer);
+ event_add_timer_msec(master,
+ vrrp_master_down_timer_expire, r,
+ r->master_down_interval * CS2MS,
+ &r->t_master_down_timer);
} else if (r->vr->preempt_mode
&& pkt->hdr.priority < r->priority) {
/* Discard advertisement */
/*
* Read and process next IPvX datagram.
*/
-static void vrrp_read(struct thread *thread)
+static void vrrp_read(struct event *thread)
{
- struct vrrp_router *r = THREAD_ARG(thread);
+ struct vrrp_router *r = EVENT_ARG(thread);
struct vrrp_pkt *pkt;
ssize_t pktsize;
memset(r->ibuf, 0x00, sizeof(r->ibuf));
if (resched)
- thread_add_read(master, vrrp_read, r, r->sock_rx, &r->t_read);
+ event_add_read(master, vrrp_read, r, r->sock_rx, &r->t_read);
}
/*
vrrp_zebra_radv_set(r, false);
/* Disable Adver_Timer */
- THREAD_OFF(r->t_adver_timer);
+ EVENT_OFF(r->t_adver_timer);
r->advert_pending = false;
r->garp_pending = false;
/*
* Called when Adver_Timer expires.
*/
-static void vrrp_adver_timer_expire(struct thread *thread)
+static void vrrp_adver_timer_expire(struct event *thread)
{
- struct vrrp_router *r = THREAD_ARG(thread);
+ struct vrrp_router *r = EVENT_ARG(thread);
DEBUGD(&vrrp_dbg_proto,
VRRP_LOGPFX VRRP_LOGPFX_VRID VRRP_LOGPFX_FAM
vrrp_send_advertisement(r);
/* Reset the Adver_Timer to Advertisement_Interval */
- thread_add_timer_msec(master, vrrp_adver_timer_expire, r,
- r->vr->advertisement_interval * CS2MS,
- &r->t_adver_timer);
+ event_add_timer_msec(master, vrrp_adver_timer_expire, r,
+ r->vr->advertisement_interval * CS2MS,
+ &r->t_adver_timer);
} else {
zlog_err(VRRP_LOGPFX VRRP_LOGPFX_VRID VRRP_LOGPFX_FAM
"Adver_Timer expired in state '%s'; this is a bug",
/*
* Called when Master_Down_Timer expires.
*/
-static void vrrp_master_down_timer_expire(struct thread *thread)
+static void vrrp_master_down_timer_expire(struct event *thread)
{
- struct vrrp_router *r = THREAD_ARG(thread);
+ struct vrrp_router *r = EVENT_ARG(thread);
zlog_info(VRRP_LOGPFX VRRP_LOGPFX_VRID VRRP_LOGPFX_FAM
"Master_Down_Timer expired",
r->vr->vrid, family2str(r->family));
- thread_add_timer_msec(master, vrrp_adver_timer_expire, r,
- r->vr->advertisement_interval * CS2MS,
- &r->t_adver_timer);
+ event_add_timer_msec(master, vrrp_adver_timer_expire, r,
+ r->vr->advertisement_interval * CS2MS,
+ &r->t_adver_timer);
vrrp_change_state(r, VRRP_STATE_MASTER);
}
}
/* Schedule listener */
- thread_add_read(master, vrrp_read, r, r->sock_rx, &r->t_read);
+ event_add_read(master, vrrp_read, r, r->sock_rx, &r->t_read);
/* Configure effective priority */
assert(listhead(r->addrs));
}
if (r->priority == VRRP_PRIO_MASTER) {
- thread_add_timer_msec(master, vrrp_adver_timer_expire, r,
- r->vr->advertisement_interval * CS2MS,
- &r->t_adver_timer);
+ event_add_timer_msec(master, vrrp_adver_timer_expire, r,
+ r->vr->advertisement_interval * CS2MS,
+ &r->t_adver_timer);
vrrp_change_state(r, VRRP_STATE_MASTER);
} else {
r->master_adver_interval = r->vr->advertisement_interval;
vrrp_recalculate_timers(r);
- thread_add_timer_msec(master, vrrp_master_down_timer_expire, r,
- r->master_down_interval * CS2MS,
- &r->t_master_down_timer);
+ event_add_timer_msec(master, vrrp_master_down_timer_expire, r,
+ r->master_down_interval * CS2MS,
+ &r->t_master_down_timer);
vrrp_change_state(r, VRRP_STATE_BACKUP);
}
}
/* Cancel all timers */
- THREAD_OFF(r->t_adver_timer);
- THREAD_OFF(r->t_master_down_timer);
- THREAD_OFF(r->t_read);
- THREAD_OFF(r->t_write);
+ EVENT_OFF(r->t_adver_timer);
+ EVENT_OFF(r->t_master_down_timer);
+ EVENT_OFF(r->t_read);
+ EVENT_OFF(r->t_write);
/* Protodown macvlan */
if (r->mvl_ifp)
list_delete(&vrs);
- hash_clean(vrrp_vrouters_hash, NULL);
- hash_free(vrrp_vrouters_hash);
+ hash_clean_and_free(&vrrp_vrouters_hash, NULL);
}
#include "lib/northbound.h"
#include "lib/privs.h"
#include "lib/stream.h"
-#include "lib/thread.h"
+#include "lib/frrevent.h"
#include "lib/vty.h"
/* Global definitions */
extern struct vrrp_defaults vd;
/* threadmaster */
-extern struct thread_master *master;
+extern struct event_loop *master;
/* privileges */
extern struct zebra_privs_t vrrp_privs;
uint32_t trans_cnt;
} stats;
- struct thread *t_master_down_timer;
- struct thread *t_adver_timer;
- struct thread *t_read;
- struct thread *t_write;
+ struct event *t_master_down_timer;
+ struct event *t_adver_timer;
+ struct event *t_read;
+ struct event *t_write;
};
/*
*/
#include <zebra.h>
+#include <getopt.h>
+
#include <lib/version.h>
#include "lib/command.h"
#include "lib/filter.h"
-#include "lib/getopt.h"
#include "lib/if.h"
#include "lib/libfrr.h"
#include "lib/log.h"
#include "lib/nexthop.h"
#include "lib/privs.h"
#include "lib/sigevent.h"
-#include "lib/thread.h"
+#include "lib/frrevent.h"
#include "lib/vrf.h"
#include "lib/vty.h"
struct option longopts[] = { {0} };
/* Master of threads. */
-struct thread_master *master;
+struct event_loop *master;
static struct frr_daemon_info vrrpd_di;
/* VTY should add timestamp */
bool vtysh_add_timestamp;
-/* VTY shell client structure */
-struct vtysh_client {
- int fd;
- const char *name;
- int flag;
- char path[MAXPATHLEN];
- struct vtysh_client *next;
-
- struct thread *log_reader;
- int log_fd;
- uint32_t lost_msgs;
-};
-
static bool stderr_tty;
static bool stderr_stdout_same;
/* --- */
+/*
+ * When updating this array, remember to change the array size here and in
+ * vtysh.h
+ */
struct vtysh_client vtysh_client[] = {
+ {.name = "mgmtd", .flag = VTYSH_MGMTD},
{.name = "zebra", .flag = VTYSH_ZEBRA},
{.name = "ripd", .flag = VTYSH_RIPD},
{.name = "ripngd", .flag = VTYSH_RIPNGD},
int ret;
const char *fname = argv[1]->arg;
- ret = vtysh_read_config(fname, true);
+ ret = vtysh_apply_config(fname, true, false);
/* Return to enable mode - the 'read_config' api leaves us up a level */
vtysh_execute_no_pager("enable");
text + textpos);
}
-static void vtysh_log_read(struct thread *thread)
+static void vtysh_log_read(struct event *thread)
{
- struct vtysh_client *vclient = THREAD_ARG(thread);
+ struct vtysh_client *vclient = EVENT_ARG(thread);
struct {
struct zlog_live_hdr hdr;
char text[4096];
const char *text;
ssize_t ret;
- thread_add_read(master, vtysh_log_read, vclient, vclient->log_fd,
- &vclient->log_reader);
+ event_add_read(master, vtysh_log_read, vclient, vclient->log_fd,
+ &vclient->log_reader);
ret = recv(vclient->log_fd, &buf, sizeof(buf), 0);
"log monitor connection closed unexpectedly");
buf.hdr.textlen = strlen(buf.text);
- THREAD_OFF(vclient->log_reader);
+ EVENT_OFF(vclient->log_reader);
close(vclient->log_fd);
vclient->log_fd = -1;
if (fd != -1) {
set_nonblocking(fd);
vclient->log_fd = fd;
- thread_add_read(master, vtysh_log_read, vclient,
- vclient->log_fd,
- &vclient->log_reader);
+ event_add_read(master, vtysh_log_read, vclient,
+ vclient->log_fd,
+ &vclient->log_reader);
}
if (ret != CMD_SUCCESS) {
vty_out(vty, "%% failed to enable logs on %s\n",
* a close notification...
*/
if (vclient->log_fd != -1) {
- THREAD_OFF(vclient->log_reader);
+ EVENT_OFF(vclient->log_reader);
close(vclient->log_fd);
vclient->log_fd = -1;
#include "memory.h"
DECLARE_MGROUP(MVTYSH);
-struct thread_master;
+struct event_loop;
-extern struct thread_master *master;
+extern struct event_loop *master;
#define VTYSH_ZEBRA 0x00001
#define VTYSH_RIPD 0x00002
#define VTYSH_VRRPD 0x40000
#define VTYSH_PATHD 0x80000
#define VTYSH_PIM6D 0x100000
+#define VTYSH_MGMTD 0x200000
#define VTYSH_WAS_ACTIVE (-2)
/* watchfrr is not in ALL since library CLI functions should not be
* run on it (logging & co. should stay in a fixed/frozen config, and
* things like prefix lists are not even initialised) */
-#define VTYSH_ALL VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_LDPD|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_NHRPD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_SHARPD|VTYSH_PBRD|VTYSH_STATICD|VTYSH_BFDD|VTYSH_FABRICD|VTYSH_VRRPD|VTYSH_PATHD
+#define VTYSH_ALL \
+ VTYSH_ZEBRA | VTYSH_RIPD | VTYSH_RIPNGD | VTYSH_OSPFD | VTYSH_OSPF6D | \
+ VTYSH_LDPD | VTYSH_BGPD | VTYSH_ISISD | VTYSH_PIMD | \
+ VTYSH_PIM6D | VTYSH_NHRPD | VTYSH_EIGRPD | VTYSH_BABELD | \
+ VTYSH_SHARPD | VTYSH_PBRD | VTYSH_STATICD | VTYSH_BFDD | \
+ VTYSH_FABRICD | VTYSH_VRRPD | VTYSH_PATHD | VTYSH_MGMTD
#define VTYSH_ACL VTYSH_BFDD|VTYSH_BABELD|VTYSH_BGPD|VTYSH_EIGRPD|VTYSH_ISISD|VTYSH_FABRICD|VTYSH_LDPD|VTYSH_NHRPD|VTYSH_OSPF6D|VTYSH_OSPFD|VTYSH_PBRD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_VRRPD|VTYSH_ZEBRA
#define VTYSH_AFFMAP VTYSH_ZEBRA | VTYSH_ISISD
#define VTYSH_RMAP VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_EIGRPD|VTYSH_FABRICD
VTYSH_EIGRPD | VTYSH_BABELD | VTYSH_PBRD | VTYSH_FABRICD | \
VTYSH_VRRPD
#define VTYSH_INTERFACE VTYSH_INTERFACE_SUBSET | VTYSH_BGPD
-#define VTYSH_VRF VTYSH_INTERFACE_SUBSET | VTYSH_STATICD
+#define VTYSH_VRF VTYSH_INTERFACE_SUBSET | VTYSH_STATICD | VTYSH_MGMTD
#define VTYSH_KEYS VTYSH_RIPD | VTYSH_EIGRPD | VTYSH_OSPF6D
/* Daemons who can process nexthop-group configs */
#define VTYSH_NH_GROUP VTYSH_PBRD|VTYSH_SHARPD
int vtysh_mark_file(const char *filename);
-int vtysh_read_config(const char *filename, bool dry_run);
+int vtysh_apply_config(const char *config_file_path, bool dry_run, bool fork);
int vtysh_write_config_integrated(void);
void vtysh_config_parse_line(void *, const char *);
extern bool vtysh_add_timestamp;
+struct vtysh_client {
+ int fd;
+ const char *name;
+ int flag;
+ char path[MAXPATHLEN];
+ struct vtysh_client *next;
+
+ struct event *log_reader;
+ int log_fd;
+ uint32_t lost_msgs;
+};
+
+extern struct vtysh_client vtysh_client[22];
+
#endif /* VTYSH_H */
*/
#include <zebra.h>
+#include <sys/wait.h>
#include "command.h"
#include "linklist.h"
return (ret);
}
-/* Read up configuration file from config_default_dir. */
-int vtysh_read_config(const char *config_default_dir, bool dry_run)
+/*
+ * Read configuration file and send it to all connected daemons
+ */
+static int vtysh_read_config(const char *config_file_path, bool dry_run)
{
FILE *confp = NULL;
bool save;
int ret;
- confp = fopen(config_default_dir, "r");
+ confp = fopen(config_file_path, "r");
if (confp == NULL) {
fprintf(stderr,
"%% Can't open configuration file %s due to '%s'.\n",
- config_default_dir, safe_strerror(errno));
+ config_file_path, safe_strerror(errno));
return CMD_ERR_NO_FILE;
}
vtysh_add_timestamp = save;
- return (ret);
+ return ret;
+}
+
+int vtysh_apply_config(const char *config_file_path, bool dry_run, bool do_fork)
+{
+ /*
+ * We need to apply the whole config file to all daemons. Instead of
+ * having one client talk to N daemons, we fork N times and let each
+ * child handle one daemon.
+ */
+ pid_t fork_pid = getpid();
+ int status = 0;
+ int ret;
+ int my_client_type;
+ char my_client[64];
+
+ if (do_fork) {
+ for (unsigned int i = 0; i < array_size(vtysh_client); i++) {
+ /* Store name of client this fork will handle */
+ strlcpy(my_client, vtysh_client[i].name,
+ sizeof(my_client));
+ my_client_type = vtysh_client[i].flag;
+ fork_pid = fork();
+
+ /* If child, break */
+ if (fork_pid == 0)
+ break;
+ }
+
+ /* parent, wait for children */
+ if (fork_pid != 0) {
+ int keep_status = 0;
+
+ fprintf(stdout,
+ "Waiting for children to finish applying config...\n");
+ while (wait(&status) > 0) {
+ if (!keep_status && WEXITSTATUS(status))
+ keep_status = WEXITSTATUS(status);
+ }
+
+ /*
+ * This will return the first status received
+ * that failed( if that happens ). This is
+ * good enough for the moment
+ */
+ return keep_status;
+ }
+
+ /*
+ * children, grow up to be cowboys
+ */
+ for (unsigned int i = 0; i < array_size(vtysh_client); i++) {
+ if (my_client_type != vtysh_client[i].flag) {
+ struct vtysh_client *cl;
+
+ /*
+ * If this is a client we aren't responsible
+ * for, disconnect
+ */
+ for (cl = &vtysh_client[i]; cl; cl = cl->next) {
+ if (cl->fd >= 0)
+ close(cl->fd);
+ cl->fd = -1;
+ }
+ } else if (vtysh_client[i].fd == -1 &&
+ vtysh_client[i].next == NULL) {
+ /*
+ * If this is the client we are responsible
+ * for, but we aren't already connected to that
+ * client, that means the client isn't up in
+ * the first place and we can exit early
+ */
+ exit(0);
+ }
+ }
+
+ fprintf(stdout, "[%d|%s] sending configuration\n", getpid(),
+ my_client);
+ }
+
+ ret = vtysh_read_config(config_file_path, dry_run);
+
+ if (ret) {
+ if (do_fork)
+ fprintf(stderr,
+ "[%d|%s] Configuration file[%s] processing failure: %d\n",
+ getpid(), my_client, frr_config, ret);
+ else
+ fprintf(stderr,
+ "Configuration file[%s] processing failure: %d\n",
+ frr_config, ret);
+ } else if (do_fork) {
+ fprintf(stderr, "[%d|%s] done\n", getpid(), my_client);
+ exit(0);
+ }
+
+ return ret;
}
/* We don't write vtysh specific into file from vtysh. vtysh.conf should
#include <sys/un.h>
#include <setjmp.h>
-#include <sys/wait.h>
#include <pwd.h>
#include <sys/file.h>
#include <unistd.h>
int user_mode;
/* Master of threads. */
-struct thread_master *master;
+struct event_loop *master;
/* Command logging */
FILE *logfile;
"-u --user Run as an unprivileged user\n"
"-w, --writeconfig Write integrated config (frr.conf) and exit\n"
"-H, --histfile Override history file\n"
+ "-t, --timestamp Print a timestamp before going to shell or reading the configuration\n"
+ " --no-fork Don't fork clients to handle daemons (slower for large configs)\n"
"-h, --help Display this help and exit\n\n"
"Note that multiple commands may be executed from the command\n"
"line by passing multiple -c args, or by embedding linefeed\n"
/* VTY shell options, we use GNU getopt library. */
#define OPTION_VTYSOCK 1000
#define OPTION_CONFDIR 1001
+#define OPTION_NOFORK 1002
struct option longopts[] = {
{"boot", no_argument, NULL, 'b'},
/* For compatibility with older zebra/quagga versions */
{"pathspace", required_argument, NULL, 'N'},
{"user", no_argument, NULL, 'u'},
{"timestamp", no_argument, NULL, 't'},
+ {"no-fork", no_argument, NULL, OPTION_NOFORK},
{0}};
bool vtysh_loop_exited;
-static struct thread *vtysh_rl_read_thread;
+static struct event *vtysh_rl_read_thread;
-static void vtysh_rl_read(struct thread *thread)
+static void vtysh_rl_read(struct event *thread)
{
- thread_add_read(master, vtysh_rl_read, NULL, STDIN_FILENO,
- &vtysh_rl_read_thread);
+ event_add_read(master, vtysh_rl_read, NULL, STDIN_FILENO,
+ &vtysh_rl_read_thread);
rl_callback_read_char();
}
/* Read a string, and return a pointer to it. Returns NULL on EOF. */
static void vtysh_rl_run(void)
{
- struct thread thread;
+ struct event thread;
- master = thread_master_create(NULL);
+ master = event_master_create(NULL);
rl_callback_handler_install(vtysh_prompt(), vtysh_rl_callback);
- thread_add_read(master, vtysh_rl_read, NULL, STDIN_FILENO,
- &vtysh_rl_read_thread);
+ event_add_read(master, vtysh_rl_read, NULL, STDIN_FILENO,
+ &vtysh_rl_read_thread);
- while (!vtysh_loop_exited && thread_fetch(master, &thread))
- thread_call(&thread);
+ while (!vtysh_loop_exited && event_fetch(master, &thread))
+ event_call(&thread);
if (!vtysh_loop_exited)
rl_callback_handler_remove();
- thread_master_free(master);
+ event_master_free(master);
}
static void log_it(const char *line)
int dryrun = 0;
int boot_flag = 0;
bool ts_flag = false;
+ bool no_fork = false;
const char *daemon_name = NULL;
const char *inputfile = NULL;
struct cmd_rec {
ditch_suid = 1; /* option disables SUID */
snprintf(sysconfdir, sizeof(sysconfdir), "%s/", optarg);
break;
+ case OPTION_NOFORK:
+ no_fork = true;
+ break;
case 'N':
if (strchr(optarg, '/') || strchr(optarg, '.')) {
fprintf(stderr,
}
}
+ /* No need for forks if we're talking to 1 daemon */
+ if (daemon_name)
+ no_fork = true;
+
if (ditch_suid) {
elevuid = realuid;
elevgid = realgid;
/* Read vtysh configuration file before connecting to daemons.
* (file may not be readable to calling user in SUID mode) */
suid_on();
- vtysh_read_config(vtysh_config, dryrun);
+ vtysh_apply_config(vtysh_config, dryrun, false);
suid_off();
}
/* Error code library system */
/* Start execution only if not in dry-run mode */
if (dryrun && !cmd) {
if (inputfile) {
- ret = vtysh_read_config(inputfile, dryrun);
+ ret = vtysh_apply_config(inputfile, dryrun, false);
} else {
- ret = vtysh_read_config(frr_config, dryrun);
+ ret = vtysh_apply_config(frr_config, dryrun, false);
}
exit(ret);
return vtysh_write_config_integrated();
}
- if (inputfile) {
+ if (boot_flag)
+ inputfile = frr_config;
+
+ if (inputfile || boot_flag) {
vtysh_flock_config(inputfile);
- ret = vtysh_read_config(inputfile, dryrun);
+ ret = vtysh_apply_config(inputfile, dryrun, !no_fork);
vtysh_unflock_config();
+
+ if (no_error)
+ ret = 0;
+
exit(ret);
}
exit(0);
}
- /* Boot startup configuration file. */
- if (boot_flag) {
- vtysh_flock_config(frr_config);
- ret = vtysh_read_config(frr_config, dryrun);
- vtysh_unflock_config();
- if (ret) {
- fprintf(stderr,
- "Configuration file[%s] processing failure: %d\n",
- frr_config, ret);
- if (no_error)
- exit(0);
- else
- exit(ret);
- } else
- exit(0);
- }
-
vtysh_readline_init();
vty_hello(vty);
*/
#include <zebra.h>
-#include <thread.h>
+#include "frrevent.h"
#include <log.h>
#include <network.h>
#include <sigevent.h>
DEFINE_MTYPE_STATIC(WATCHFRR, WATCHFRR_DAEMON, "watchfrr daemon entry");
/* Needs to be global, referenced somewhere inside libfrr. */
-struct thread_master *master;
+struct event_loop *master;
static bool watch_only = false;
const char *pathspace;
pid_t pid;
struct timeval time;
long interval;
- struct thread *t_kill;
+ struct event *t_kill;
int kills;
};
static struct global_state {
enum restart_phase phase;
- struct thread *t_phase_hanging;
- struct thread *t_startup_timeout;
- struct thread *t_operational;
+ struct event *t_phase_hanging;
+ struct event *t_startup_timeout;
+ struct event *t_operational;
const char *vtydir;
long period;
long timeout;
int fd;
struct timeval echo_sent;
unsigned int connect_tries;
- struct thread *t_wakeup;
- struct thread *t_read;
- struct thread *t_write;
+ struct event *t_wakeup;
+ struct event *t_read;
+ struct event *t_write;
struct daemon *next;
struct restart_info restart;
{NULL, 0, NULL, 0}};
static int try_connect(struct daemon *dmn);
-static void wakeup_send_echo(struct thread *t_wakeup);
+static void wakeup_send_echo(struct event *t_wakeup);
static void try_restart(struct daemon *dmn);
static void phase_check(void);
static void restart_done(struct daemon *dmn);
return result;
}
-static void restart_kill(struct thread *t_kill)
+static void restart_kill(struct event *t_kill)
{
- struct restart_info *restart = THREAD_ARG(t_kill);
+ struct restart_info *restart = EVENT_ARG(t_kill);
struct timeval delay;
time_elapsed(&delay, &restart->time);
zlog_err(
"%s %s child process appears to still be reading configuration, delaying for another %lu time",
restart->what, restart->name, gs.restart_timeout);
- thread_add_timer(master, restart_kill, restart,
- gs.restart_timeout, &restart->t_kill);
+ event_add_timer(master, restart_kill, restart,
+ gs.restart_timeout, &restart->t_kill);
return;
}
(long)delay.tv_sec, (restart->kills ? SIGKILL : SIGTERM));
kill(-restart->pid, (restart->kills ? SIGKILL : SIGTERM));
restart->kills++;
- thread_add_timer(master, restart_kill, restart, gs.restart_timeout,
- &restart->t_kill);
+ event_add_timer(master, restart_kill, restart, gs.restart_timeout,
+ &restart->t_kill);
}
static struct restart_info *find_child(pid_t child)
what = restart->what;
restart->pid = 0;
gs.numpids--;
- thread_cancel(&restart->t_kill);
+ event_cancel(&restart->t_kill);
/* Update restart time to reflect the time the command
* completed. */
snprintf(cmd, sizeof(cmd), command, restart->name);
#pragma GCC diagnostic pop
if ((restart->pid = run_background(cmd)) > 0) {
- thread_add_timer(master, restart_kill, restart,
- gs.restart_timeout, &restart->t_kill);
+ event_add_timer(master, restart_kill, restart,
+ gs.restart_timeout, &restart->t_kill);
restart->what = cmdtype;
gs.numpids++;
} else
#define SET_READ_HANDLER(DMN) \
do { \
(DMN)->t_read = NULL; \
- thread_add_read(master, handle_read, (DMN), (DMN)->fd, \
- &(DMN)->t_read); \
+ event_add_read(master, handle_read, (DMN), (DMN)->fd, \
+ &(DMN)->t_read); \
} while (0);
#define SET_WAKEUP_DOWN(DMN) \
do { \
(DMN)->t_wakeup = NULL; \
- thread_add_timer_msec(master, wakeup_down, (DMN), \
- FUZZY(gs.period), &(DMN)->t_wakeup); \
+ event_add_timer_msec(master, wakeup_down, (DMN), \
+ FUZZY(gs.period), &(DMN)->t_wakeup); \
} while (0);
#define SET_WAKEUP_UNRESPONSIVE(DMN) \
do { \
(DMN)->t_wakeup = NULL; \
- thread_add_timer_msec(master, wakeup_unresponsive, (DMN), \
- FUZZY(gs.period), &(DMN)->t_wakeup); \
+ event_add_timer_msec(master, wakeup_unresponsive, (DMN), \
+ FUZZY(gs.period), &(DMN)->t_wakeup); \
} while (0);
#define SET_WAKEUP_ECHO(DMN) \
do { \
(DMN)->t_wakeup = NULL; \
- thread_add_timer_msec(master, wakeup_send_echo, (DMN), \
- FUZZY(gs.period), &(DMN)->t_wakeup); \
+ event_add_timer_msec(master, wakeup_send_echo, (DMN), \
+ FUZZY(gs.period), &(DMN)->t_wakeup); \
} while (0);
-static void wakeup_down(struct thread *t_wakeup)
+static void wakeup_down(struct event *t_wakeup)
{
- struct daemon *dmn = THREAD_ARG(t_wakeup);
+ struct daemon *dmn = EVENT_ARG(t_wakeup);
dmn->t_wakeup = NULL;
if (try_connect(dmn) < 0)
try_restart(dmn);
}
-static void wakeup_init(struct thread *t_wakeup)
+static void wakeup_init(struct event *t_wakeup)
{
- struct daemon *dmn = THREAD_ARG(t_wakeup);
+ struct daemon *dmn = EVENT_ARG(t_wakeup);
dmn->t_wakeup = NULL;
if (try_connect(dmn) < 0) {
dmn->name, state_str[dmn->state]);
return;
}
- THREAD_OFF(dmn->t_wakeup);
+ EVENT_OFF(dmn->t_wakeup);
if (try_connect(dmn) < 0)
SET_WAKEUP_DOWN(dmn);
}
-static void daemon_restarting_operational(struct thread *thread)
+static void daemon_restarting_operational(struct event *thread)
{
systemd_send_status("FRR Operational");
}
close(dmn->fd);
dmn->fd = -1;
}
- THREAD_OFF(dmn->t_read);
- THREAD_OFF(dmn->t_write);
- THREAD_OFF(dmn->t_wakeup);
+ EVENT_OFF(dmn->t_read);
+ EVENT_OFF(dmn->t_write);
+ EVENT_OFF(dmn->t_wakeup);
if (try_connect(dmn) < 0)
SET_WAKEUP_DOWN(dmn);
phase_check();
}
-static void handle_read(struct thread *t_read)
+static void handle_read(struct event *t_read)
{
- struct daemon *dmn = THREAD_ARG(t_read);
+ struct daemon *dmn = EVENT_ARG(t_read);
static const char resp[sizeof(PING_TOKEN) + 4] = PING_TOKEN "\n";
char buf[sizeof(resp) + 100];
ssize_t rc;
dmn->name, (long)delay.tv_sec, (long)delay.tv_usec);
SET_READ_HANDLER(dmn);
- thread_cancel(&dmn->t_wakeup);
+ event_cancel(&dmn->t_wakeup);
SET_WAKEUP_ECHO(dmn);
}
if (gs.numdown == 0) {
daemon_send_ready(0);
- THREAD_OFF(gs.t_operational);
+ EVENT_OFF(gs.t_operational);
- thread_add_timer(master, daemon_restarting_operational, NULL,
- gs.operational_timeout, &gs.t_operational);
+ event_add_timer(master, daemon_restarting_operational, NULL,
+ gs.operational_timeout, &gs.t_operational);
}
SET_WAKEUP_ECHO(dmn);
phase_check();
}
-static void check_connect(struct thread *t_write)
+static void check_connect(struct event *t_write)
{
- struct daemon *dmn = THREAD_ARG(t_write);
+ struct daemon *dmn = EVENT_ARG(t_write);
int sockerr;
socklen_t reslen = sizeof(sockerr);
daemon_up(dmn, "delayed connect succeeded");
}
-static void wakeup_connect_hanging(struct thread *t_wakeup)
+static void wakeup_connect_hanging(struct event *t_wakeup)
{
- struct daemon *dmn = THREAD_ARG(t_wakeup);
+ struct daemon *dmn = EVENT_ARG(t_wakeup);
char why[100];
dmn->t_wakeup = NULL;
zlog_debug("%s: connection in progress", dmn->name);
dmn->state = DAEMON_CONNECTING;
dmn->fd = sock;
- thread_add_write(master, check_connect, dmn, dmn->fd,
- &dmn->t_write);
- thread_add_timer(master, wakeup_connect_hanging, dmn,
- gs.timeout, &dmn->t_wakeup);
+ event_add_write(master, check_connect, dmn, dmn->fd,
+ &dmn->t_write);
+ event_add_timer(master, wakeup_connect_hanging, dmn, gs.timeout,
+ &dmn->t_wakeup);
SET_READ_HANDLER(dmn);
return 0;
}
return 1;
}
-static void phase_hanging(struct thread *t_hanging)
+static void phase_hanging(struct event *t_hanging)
{
gs.t_phase_hanging = NULL;
flog_err(EC_WATCHFRR_CONNECTION,
static void set_phase(enum restart_phase new_phase)
{
gs.phase = new_phase;
- thread_cancel(&gs.t_phase_hanging);
+ event_cancel(&gs.t_phase_hanging);
- thread_add_timer(master, phase_hanging, NULL, PHASE_TIMEOUT,
- &gs.t_phase_hanging);
+ event_add_timer(master, phase_hanging, NULL, PHASE_TIMEOUT,
+ &gs.t_phase_hanging);
}
static void phase_check(void)
gs.start_command, 1, 0);
}
gs.phase = PHASE_NONE;
- THREAD_OFF(gs.t_phase_hanging);
+ EVENT_OFF(gs.t_phase_hanging);
zlog_notice("Phased global restart has completed.");
break;
}
run_job(&gs.restart, "restart", gs.restart_command, 0, 1);
}
-static void wakeup_unresponsive(struct thread *t_wakeup)
+static void wakeup_unresponsive(struct event *t_wakeup)
{
- struct daemon *dmn = THREAD_ARG(t_wakeup);
+ struct daemon *dmn = EVENT_ARG(t_wakeup);
dmn->t_wakeup = NULL;
if (dmn->state != DAEMON_UNRESPONSIVE)
}
}
-static void wakeup_no_answer(struct thread *t_wakeup)
+static void wakeup_no_answer(struct event *t_wakeup)
{
- struct daemon *dmn = THREAD_ARG(t_wakeup);
+ struct daemon *dmn = EVENT_ARG(t_wakeup);
dmn->t_wakeup = NULL;
dmn->state = DAEMON_UNRESPONSIVE;
try_restart(dmn);
}
-static void wakeup_send_echo(struct thread *t_wakeup)
+static void wakeup_send_echo(struct event *t_wakeup)
{
static const char echocmd[] = "echo " PING_TOKEN;
ssize_t rc;
- struct daemon *dmn = THREAD_ARG(t_wakeup);
+ struct daemon *dmn = EVENT_ARG(t_wakeup);
dmn->t_wakeup = NULL;
if (((rc = write(dmn->fd, echocmd, sizeof(echocmd))) < 0)
daemon_down(dmn, why);
} else {
gettimeofday(&dmn->echo_sent, NULL);
- thread_add_timer(master, wakeup_no_answer, dmn, gs.timeout,
- &dmn->t_wakeup);
+ event_add_timer(master, wakeup_no_answer, dmn, gs.timeout,
+ &dmn->t_wakeup);
}
}
return res;
}
-static void startup_timeout(struct thread *t_wakeup)
+static void startup_timeout(struct event *t_wakeup)
{
daemon_send_ready(1);
}
struct daemon *dmn, **add = &gs.daemons;
char alldaemons[512] = "", *p = alldaemons;
- thread_add_timer_msec(master, startup_timeout, NULL, STARTUP_TIMEOUT,
- &gs.t_startup_timeout);
+ event_add_timer_msec(master, startup_timeout, NULL, STARTUP_TIMEOUT,
+ &gs.t_startup_timeout);
for (i = optind; i < argc; i++) {
dmn = XCALLOC(MTYPE_WATCHFRR_DAEMON, sizeof(*dmn));
gs.numdaemons++;
gs.numdown++;
dmn->fd = -1;
- thread_add_timer_msec(master, wakeup_init, dmn, 0,
- &dmn->t_wakeup);
+ event_add_timer_msec(master, wakeup_init, dmn, 0,
+ &dmn->t_wakeup);
dmn->restart.interval = gs.min_restart_interval;
*add = dmn;
add = &dmn->next;
when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, 'frr-bgp-route-map:distance')";
leaf distance {
type uint8 {
- range "0..255";
+ range "1..255";
}
}
}
}
-static int zd_dpdk_plugin_init(struct thread_master *tm)
+static int zd_dpdk_plugin_init(struct event_loop *tm)
{
int ret;
/* data plane events. */
struct zebra_dplane_provider *prov;
struct frr_pthread *fthread;
- struct thread *t_connect;
- struct thread *t_read;
- struct thread *t_write;
- struct thread *t_event;
- struct thread *t_nhg;
- struct thread *t_dequeue;
+ struct event *t_connect;
+ struct event *t_read;
+ struct event *t_write;
+ struct event *t_event;
+ struct event *t_nhg;
+ struct event *t_dequeue;
/* zebra events. */
- struct thread *t_lspreset;
- struct thread *t_lspwalk;
- struct thread *t_nhgreset;
- struct thread *t_nhgwalk;
- struct thread *t_ribreset;
- struct thread *t_ribwalk;
- struct thread *t_rmacreset;
- struct thread *t_rmacwalk;
+ struct event *t_lspreset;
+ struct event *t_lspwalk;
+ struct event *t_nhgreset;
+ struct event *t_nhgwalk;
+ struct event *t_ribreset;
+ struct event *t_ribwalk;
+ struct event *t_rmacreset;
+ struct event *t_rmacwalk;
/* Statistic counters. */
struct {
};
#define FPM_RECONNECT(fnc) \
- thread_add_event((fnc)->fthread->master, fpm_process_event, (fnc), \
- FNE_INTERNAL_RECONNECT, &(fnc)->t_event)
+ event_add_event((fnc)->fthread->master, fpm_process_event, (fnc), \
+ FNE_INTERNAL_RECONNECT, &(fnc)->t_event)
#define WALK_FINISH(fnc, ev) \
- thread_add_event((fnc)->fthread->master, fpm_process_event, (fnc), \
- (ev), NULL)
+ event_add_event((fnc)->fthread->master, fpm_process_event, (fnc), \
+ (ev), NULL)
/*
* Prototypes.
*/
-static void fpm_process_event(struct thread *t);
+static void fpm_process_event(struct event *t);
static int fpm_nl_enqueue(struct fpm_nl_ctx *fnc, struct zebra_dplane_ctx *ctx);
-static void fpm_lsp_send(struct thread *t);
-static void fpm_lsp_reset(struct thread *t);
-static void fpm_nhg_send(struct thread *t);
-static void fpm_nhg_reset(struct thread *t);
-static void fpm_rib_send(struct thread *t);
-static void fpm_rib_reset(struct thread *t);
-static void fpm_rmac_send(struct thread *t);
-static void fpm_rmac_reset(struct thread *t);
+static void fpm_lsp_send(struct event *t);
+static void fpm_lsp_reset(struct event *t);
+static void fpm_nhg_send(struct event *t);
+static void fpm_nhg_reset(struct event *t);
+static void fpm_rib_send(struct event *t);
+static void fpm_rib_reset(struct event *t);
+static void fpm_rmac_send(struct event *t);
+static void fpm_rmac_reset(struct event *t);
/*
* CLI.
memcpy(&sin6->sin6_addr, naddr, sizeof(sin6->sin6_addr));
ask_reconnect:
- thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
- FNE_RECONNECT, &gfnc->t_event);
+ event_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
+ FNE_RECONNECT, &gfnc->t_event);
return CMD_SUCCESS;
}
"FPM remote listening server port\n"
"Remote FPM server port\n")
{
- thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
- FNE_DISABLE, &gfnc->t_event);
+ event_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
+ FNE_DISABLE, &gfnc->t_event);
return CMD_SUCCESS;
}
if (gfnc->use_nhg)
return CMD_SUCCESS;
- thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
- FNE_TOGGLE_NHG, &gfnc->t_nhg);
+ event_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
+ FNE_TOGGLE_NHG, &gfnc->t_nhg);
return CMD_SUCCESS;
}
if (!gfnc->use_nhg)
return CMD_SUCCESS;
- thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
- FNE_TOGGLE_NHG, &gfnc->t_nhg);
+ event_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
+ FNE_TOGGLE_NHG, &gfnc->t_nhg);
return CMD_SUCCESS;
}
FPM_STR
"FPM statistic counters\n")
{
- thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
- FNE_RESET_COUNTERS, &gfnc->t_event);
+ event_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
+ FNE_RESET_COUNTERS, &gfnc->t_event);
return CMD_SUCCESS;
}
/*
* FPM functions.
*/
-static void fpm_connect(struct thread *t);
+static void fpm_connect(struct event *t);
static void fpm_reconnect(struct fpm_nl_ctx *fnc)
{
/* Cancel all zebra threads first. */
- thread_cancel_async(zrouter.master, &fnc->t_lspreset, NULL);
- thread_cancel_async(zrouter.master, &fnc->t_lspwalk, NULL);
- thread_cancel_async(zrouter.master, &fnc->t_nhgreset, NULL);
- thread_cancel_async(zrouter.master, &fnc->t_nhgwalk, NULL);
- thread_cancel_async(zrouter.master, &fnc->t_ribreset, NULL);
- thread_cancel_async(zrouter.master, &fnc->t_ribwalk, NULL);
- thread_cancel_async(zrouter.master, &fnc->t_rmacreset, NULL);
- thread_cancel_async(zrouter.master, &fnc->t_rmacwalk, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_lspreset, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_lspwalk, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_nhgreset, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_nhgwalk, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_ribreset, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_ribwalk, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_rmacreset, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_rmacwalk, NULL);
/*
* Grab the lock to empty the streams (data plane might try to
stream_reset(fnc->ibuf);
stream_reset(fnc->obuf);
- THREAD_OFF(fnc->t_read);
- THREAD_OFF(fnc->t_write);
+ EVENT_OFF(fnc->t_read);
+ EVENT_OFF(fnc->t_write);
/* FPM is disabled, don't attempt to connect. */
if (fnc->disabled)
return;
- thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
- &fnc->t_connect);
+ event_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
+ &fnc->t_connect);
}
-static void fpm_read(struct thread *t)
+static void fpm_read(struct event *t)
{
- struct fpm_nl_ctx *fnc = THREAD_ARG(t);
+ struct fpm_nl_ctx *fnc = EVENT_ARG(t);
fpm_msg_hdr_t fpm;
ssize_t rv;
char buf[65535];
}
/* Schedule the next read */
- thread_add_read(fnc->fthread->master, fpm_read, fnc, fnc->socket,
- &fnc->t_read);
+ event_add_read(fnc->fthread->master, fpm_read, fnc, fnc->socket,
+ &fnc->t_read);
/* We've got an interruption. */
if (rv == -2)
stream_reset(fnc->ibuf);
}
-static void fpm_write(struct thread *t)
+static void fpm_write(struct event *t)
{
- struct fpm_nl_ctx *fnc = THREAD_ARG(t);
+ struct fpm_nl_ctx *fnc = EVENT_ARG(t);
socklen_t statuslen;
ssize_t bwritten;
int rv, status;
* Starting with LSPs walk all FPM objects, marking them
* as unsent and then replaying them.
*/
- thread_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0,
- &fnc->t_lspreset);
+ event_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0,
+ &fnc->t_lspreset);
/* Permit receiving messages now. */
- thread_add_read(fnc->fthread->master, fpm_read, fnc,
- fnc->socket, &fnc->t_read);
+ event_add_read(fnc->fthread->master, fpm_read, fnc, fnc->socket,
+ &fnc->t_read);
}
frr_mutex_lock_autounlock(&fnc->obuf_mutex);
/* Stream is not empty yet, we must schedule more writes. */
if (STREAM_READABLE(fnc->obuf)) {
stream_pulldown(fnc->obuf);
- thread_add_write(fnc->fthread->master, fpm_write, fnc,
- fnc->socket, &fnc->t_write);
+ event_add_write(fnc->fthread->master, fpm_write, fnc,
+ fnc->socket, &fnc->t_write);
return;
}
}
-static void fpm_connect(struct thread *t)
+static void fpm_connect(struct event *t)
{
- struct fpm_nl_ctx *fnc = THREAD_ARG(t);
+ struct fpm_nl_ctx *fnc = EVENT_ARG(t);
struct sockaddr_in *sin = (struct sockaddr_in *)&fnc->addr;
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&fnc->addr;
socklen_t slen;
if (sock == -1) {
zlog_err("%s: fpm socket failed: %s", __func__,
strerror(errno));
- thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
- &fnc->t_connect);
+ event_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
+ &fnc->t_connect);
return;
}
close(sock);
zlog_warn("%s: fpm connection failed: %s", __func__,
strerror(errno));
- thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
- &fnc->t_connect);
+ event_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
+ &fnc->t_connect);
return;
}
fnc->connecting = (errno == EINPROGRESS);
fnc->socket = sock;
if (!fnc->connecting)
- thread_add_read(fnc->fthread->master, fpm_read, fnc, sock,
- &fnc->t_read);
- thread_add_write(fnc->fthread->master, fpm_write, fnc, sock,
- &fnc->t_write);
+ event_add_read(fnc->fthread->master, fpm_read, fnc, sock,
+ &fnc->t_read);
+ event_add_write(fnc->fthread->master, fpm_write, fnc, sock,
+ &fnc->t_write);
/*
* Starting with LSPs walk all FPM objects, marking them
* If we are not connected, then delay the objects reset/send.
*/
if (!fnc->connecting)
- thread_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0,
- &fnc->t_lspreset);
+ event_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0,
+ &fnc->t_lspreset);
}
/**
memory_order_relaxed);
/* Tell the thread to start writing. */
- thread_add_write(fnc->fthread->master, fpm_write, fnc, fnc->socket,
- &fnc->t_write);
+ event_add_write(fnc->fthread->master, fpm_write, fnc, fnc->socket,
+ &fnc->t_write);
return 0;
}
return HASHWALK_CONTINUE;
}
-static void fpm_lsp_send(struct thread *t)
+static void fpm_lsp_send(struct event *t)
{
- struct fpm_nl_ctx *fnc = THREAD_ARG(t);
- struct zebra_vrf *zvrf = vrf_info_lookup(VRF_DEFAULT);
+ struct fpm_nl_ctx *fnc = EVENT_ARG(t);
+ struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
struct fpm_lsp_arg fla;
fla.fnc = fnc;
WALK_FINISH(fnc, FNE_LSP_FINISHED);
/* Now move onto routes */
- thread_add_timer(zrouter.master, fpm_nhg_reset, fnc, 0,
- &fnc->t_nhgreset);
+ event_add_timer(zrouter.master, fpm_nhg_reset, fnc, 0,
+ &fnc->t_nhgreset);
} else {
/* Didn't finish - reschedule LSP walk */
- thread_add_timer(zrouter.master, fpm_lsp_send, fnc, 0,
- &fnc->t_lspwalk);
+ event_add_timer(zrouter.master, fpm_lsp_send, fnc, 0,
+ &fnc->t_lspwalk);
}
}
return HASHWALK_CONTINUE;
}
-static void fpm_nhg_send(struct thread *t)
+static void fpm_nhg_send(struct event *t)
{
- struct fpm_nl_ctx *fnc = THREAD_ARG(t);
+ struct fpm_nl_ctx *fnc = EVENT_ARG(t);
struct fpm_nhg_arg fna;
fna.fnc = fnc;
/* We are done sending next hops, lets install the routes now. */
if (fna.complete) {
WALK_FINISH(fnc, FNE_NHG_FINISHED);
- thread_add_timer(zrouter.master, fpm_rib_reset, fnc, 0,
- &fnc->t_ribreset);
+ event_add_timer(zrouter.master, fpm_rib_reset, fnc, 0,
+ &fnc->t_ribreset);
} else /* Otherwise reschedule next hop group again. */
- thread_add_timer(zrouter.master, fpm_nhg_send, fnc, 0,
- &fnc->t_nhgwalk);
+ event_add_timer(zrouter.master, fpm_nhg_send, fnc, 0,
+ &fnc->t_nhgwalk);
}
/**
* Send all RIB installed routes to the connected data plane.
*/
-static void fpm_rib_send(struct thread *t)
+static void fpm_rib_send(struct event *t)
{
- struct fpm_nl_ctx *fnc = THREAD_ARG(t);
+ struct fpm_nl_ctx *fnc = EVENT_ARG(t);
rib_dest_t *dest;
struct route_node *rn;
struct route_table *rt;
/* Free the temporary allocated context. */
dplane_ctx_fini(&ctx);
- thread_add_timer(zrouter.master, fpm_rib_send,
- fnc, 1, &fnc->t_ribwalk);
+ event_add_timer(zrouter.master, fpm_rib_send,
+ fnc, 1, &fnc->t_ribwalk);
return;
}
WALK_FINISH(fnc, FNE_RIB_FINISHED);
/* Schedule next event: RMAC reset. */
- thread_add_event(zrouter.master, fpm_rmac_reset, fnc, 0,
- &fnc->t_rmacreset);
+ event_add_event(zrouter.master, fpm_rmac_reset, fnc, 0,
+ &fnc->t_rmacreset);
}
/*
zrmac->fwd_info.r_vtep_ip, sticky, 0 /*nhg*/,
0 /*update_flags*/);
if (fpm_nl_enqueue(fra->fnc, fra->ctx) == -1) {
- thread_add_timer(zrouter.master, fpm_rmac_send,
- fra->fnc, 1, &fra->fnc->t_rmacwalk);
+ event_add_timer(zrouter.master, fpm_rmac_send, fra->fnc, 1,
+ &fra->fnc->t_rmacwalk);
fra->complete = false;
}
}
hash_iterate(zl3vni->rmac_table, fpm_enqueue_rmac_table, zl3vni);
}
-static void fpm_rmac_send(struct thread *t)
+static void fpm_rmac_send(struct event *t)
{
struct fpm_rmac_arg fra;
- fra.fnc = THREAD_ARG(t);
+ fra.fnc = EVENT_ARG(t);
fra.ctx = dplane_ctx_alloc();
fra.complete = true;
hash_iterate(zrouter.l3vni_table, fpm_enqueue_l3vni_table, &fra);
UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_FPM);
}
-static void fpm_nhg_reset(struct thread *t)
+static void fpm_nhg_reset(struct event *t)
{
- struct fpm_nl_ctx *fnc = THREAD_ARG(t);
+ struct fpm_nl_ctx *fnc = EVENT_ARG(t);
hash_iterate(zrouter.nhgs_id, fpm_nhg_reset_cb, NULL);
/* Schedule next step: send next hop groups. */
- thread_add_event(zrouter.master, fpm_nhg_send, fnc, 0, &fnc->t_nhgwalk);
+ event_add_event(zrouter.master, fpm_nhg_send, fnc, 0, &fnc->t_nhgwalk);
}
/*
UNSET_FLAG(lsp->flags, LSP_FLAG_FPM);
}
-static void fpm_lsp_reset(struct thread *t)
+static void fpm_lsp_reset(struct event *t)
{
- struct fpm_nl_ctx *fnc = THREAD_ARG(t);
- struct zebra_vrf *zvrf = vrf_info_lookup(VRF_DEFAULT);
+ struct fpm_nl_ctx *fnc = EVENT_ARG(t);
+ struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
hash_iterate(zvrf->lsp_table, fpm_lsp_reset_cb, NULL);
/* Schedule next step: send LSPs */
- thread_add_event(zrouter.master, fpm_lsp_send, fnc, 0, &fnc->t_lspwalk);
+ event_add_event(zrouter.master, fpm_lsp_send, fnc, 0, &fnc->t_lspwalk);
}
/**
* Resets the RIB FPM flags so we send all routes again.
*/
-static void fpm_rib_reset(struct thread *t)
+static void fpm_rib_reset(struct event *t)
{
- struct fpm_nl_ctx *fnc = THREAD_ARG(t);
+ struct fpm_nl_ctx *fnc = EVENT_ARG(t);
rib_dest_t *dest;
struct route_node *rn;
struct route_table *rt;
}
/* Schedule next step: send RIB routes. */
- thread_add_event(zrouter.master, fpm_rib_send, fnc, 0, &fnc->t_ribwalk);
+ event_add_event(zrouter.master, fpm_rib_send, fnc, 0, &fnc->t_ribwalk);
}
/*
hash_iterate(zl3vni->rmac_table, fpm_unset_rmac_table, zl3vni);
}
-static void fpm_rmac_reset(struct thread *t)
+static void fpm_rmac_reset(struct event *t)
{
- struct fpm_nl_ctx *fnc = THREAD_ARG(t);
+ struct fpm_nl_ctx *fnc = EVENT_ARG(t);
hash_iterate(zrouter.l3vni_table, fpm_unset_l3vni_table, NULL);
/* Schedule next event: send RMAC entries. */
- thread_add_event(zrouter.master, fpm_rmac_send, fnc, 0,
- &fnc->t_rmacwalk);
+ event_add_event(zrouter.master, fpm_rmac_send, fnc, 0,
+ &fnc->t_rmacwalk);
}
-static void fpm_process_queue(struct thread *t)
+static void fpm_process_queue(struct event *t)
{
- struct fpm_nl_ctx *fnc = THREAD_ARG(t);
+ struct fpm_nl_ctx *fnc = EVENT_ARG(t);
struct zebra_dplane_ctx *ctx;
bool no_bufs = false;
uint64_t processed_contexts = 0;
/* Re-schedule if we ran out of buffer space */
if (no_bufs)
- thread_add_timer(fnc->fthread->master, fpm_process_queue,
- fnc, 0, &fnc->t_dequeue);
+ event_add_timer(fnc->fthread->master, fpm_process_queue, fnc, 0,
+ &fnc->t_dequeue);
/*
* Let the dataplane thread know if there are items in the
/**
* Handles external (e.g. CLI, data plane or others) events.
*/
-static void fpm_process_event(struct thread *t)
+static void fpm_process_event(struct event *t)
{
- struct fpm_nl_ctx *fnc = THREAD_ARG(t);
- enum fpm_nl_events event = THREAD_VAL(t);
+ struct fpm_nl_ctx *fnc = EVENT_ARG(t);
+ enum fpm_nl_events event = EVENT_VAL(t);
switch (event) {
case FNE_DISABLE:
static int fpm_nl_finish_early(struct fpm_nl_ctx *fnc)
{
/* Disable all events and close socket. */
- THREAD_OFF(fnc->t_lspreset);
- THREAD_OFF(fnc->t_lspwalk);
- THREAD_OFF(fnc->t_nhgreset);
- THREAD_OFF(fnc->t_nhgwalk);
- THREAD_OFF(fnc->t_ribreset);
- THREAD_OFF(fnc->t_ribwalk);
- THREAD_OFF(fnc->t_rmacreset);
- THREAD_OFF(fnc->t_rmacwalk);
- THREAD_OFF(fnc->t_event);
- THREAD_OFF(fnc->t_nhg);
- thread_cancel_async(fnc->fthread->master, &fnc->t_read, NULL);
- thread_cancel_async(fnc->fthread->master, &fnc->t_write, NULL);
- thread_cancel_async(fnc->fthread->master, &fnc->t_connect, NULL);
+ EVENT_OFF(fnc->t_lspreset);
+ EVENT_OFF(fnc->t_lspwalk);
+ EVENT_OFF(fnc->t_nhgreset);
+ EVENT_OFF(fnc->t_nhgwalk);
+ EVENT_OFF(fnc->t_ribreset);
+ EVENT_OFF(fnc->t_ribwalk);
+ EVENT_OFF(fnc->t_rmacreset);
+ EVENT_OFF(fnc->t_rmacwalk);
+ EVENT_OFF(fnc->t_event);
+ EVENT_OFF(fnc->t_nhg);
+ event_cancel_async(fnc->fthread->master, &fnc->t_read, NULL);
+ event_cancel_async(fnc->fthread->master, &fnc->t_write, NULL);
+ event_cancel_async(fnc->fthread->master, &fnc->t_connect, NULL);
if (fnc->socket != -1) {
close(fnc->socket);
if (atomic_load_explicit(&fnc->counters.ctxqueue_len,
memory_order_relaxed)
> 0)
- thread_add_timer(fnc->fthread->master, fpm_process_queue,
- fnc, 0, &fnc->t_dequeue);
+ event_add_timer(fnc->fthread->master, fpm_process_queue, fnc, 0,
+ &fnc->t_dequeue);
/* Ensure dataplane thread is rescheduled if we hit the work limit */
if (counter >= limit)
return 0;
}
-static int fpm_nl_new(struct thread_master *tm)
+static int fpm_nl_new(struct event_loop *tm)
{
struct zebra_dplane_provider *prov = NULL;
int rv;
#include "table.h"
#include "memory.h"
#include "rib.h"
-#include "thread.h"
+#include "frrevent.h"
#include "privs.h"
#include "nexthop.h"
#include "vrf.h"
static void if_down_del_nbr_connected(struct interface *ifp);
-static void if_zebra_speed_update(struct thread *thread)
+static void if_zebra_speed_update(struct event *thread)
{
- struct interface *ifp = THREAD_ARG(thread);
+ struct interface *ifp = EVENT_ARG(thread);
struct zebra_if *zif = ifp->info;
uint32_t new_speed;
bool changed = false;
return;
zif->speed_update_count++;
- thread_add_timer(zrouter.master, if_zebra_speed_update, ifp,
- SPEED_UPDATE_SLEEP_TIME, &zif->speed_update);
- thread_ignore_late_timer(zif->speed_update);
+ event_add_timer(zrouter.master, if_zebra_speed_update, ifp,
+ SPEED_UPDATE_SLEEP_TIME, &zif->speed_update);
+ event_ignore_late_timer(zif->speed_update);
}
}
* down upon startup.
*/
zebra_if->speed_update_count = 0;
- thread_add_timer(zrouter.master, if_zebra_speed_update, ifp, 15,
- &zebra_if->speed_update);
- thread_ignore_late_timer(zebra_if->speed_update);
+ event_add_timer(zrouter.master, if_zebra_speed_update, ifp, 15,
+ &zebra_if->speed_update);
+ event_ignore_late_timer(zebra_if->speed_update);
return 0;
}
XFREE(MTYPE_ZIF_DESC, zebra_if->desc);
- THREAD_OFF(zebra_if->speed_update);
+ EVENT_OFF(zebra_if->speed_update);
XFREE(MTYPE_ZINFO, zebra_if);
}
if (zif->flags & ZIF_FLAG_EVPN_MH_UPLINK)
zebra_evpn_mh_uplink_oper_update(zif);
- thread_add_timer(zrouter.master, if_zebra_speed_update, ifp, 0,
- &zif->speed_update);
- thread_ignore_late_timer(zif->speed_update);
+ event_add_timer(zrouter.master, if_zebra_speed_update, ifp, 0,
+ &zif->speed_update);
+ event_ignore_late_timer(zif->speed_update);
}
/* Interface goes down. We have to manage different behavior of based
struct interface *link;
uint8_t speed_update_count;
- struct thread *speed_update;
+ struct event *speed_update;
/*
* Does this interface have a v6 to v4 ll neighbor entry
#define IF_SHUTDOWN (1<<6)
struct interface *ifp;
- struct thread *t_advertise;
+ struct event *t_advertise;
unsigned long irdp_sent;
uint16_t Lifetime;
extern void irdp_if_init(void);
extern int irdp_sock_init(void);
extern int irdp_config_write(struct vty *, struct interface *);
-extern void irdp_send_thread(struct thread *t_advert);
+extern void irdp_send_thread(struct event *t_advert);
extern void irdp_advert_off(struct interface *ifp);
extern void process_solicit(struct interface *ifp);
-extern void irdp_read_raw(struct thread *r);
+extern void irdp_read_raw(struct event *r);
extern void send_packet(struct interface *ifp, struct stream *s, uint32_t dst,
struct prefix *p, uint32_t ttl);
#include "connected.h"
#include "log.h"
#include "zclient.h"
-#include "thread.h"
+#include "frrevent.h"
#include "lib_errors.h"
#include "zebra/interface.h"
#include "zebra/rtadv.h"
timer);
irdp->t_advertise = NULL;
- thread_add_timer(zrouter.master, irdp_send_thread, ifp, timer,
- &irdp->t_advertise);
+ event_add_timer(zrouter.master, irdp_send_thread, ifp, timer,
+ &irdp->t_advertise);
}
static void irdp_if_stop(struct interface *ifp)
#include "connected.h"
#include "log.h"
#include "zclient.h"
-#include "thread.h"
+#include "frrevent.h"
#include "privs.h"
#include "libfrr.h"
#include "lib_errors.h"
extern struct zebra_privs_t zserv_privs;
-struct thread *t_irdp_raw;
+struct event *t_irdp_raw;
/* Timer interval of irdp. */
int irdp_timer_interval = IRDP_DEFAULT_INTERVAL;
return ret;
};
- thread_add_read(zrouter.master, irdp_read_raw, NULL, sock, &t_irdp_raw);
+ event_add_read(zrouter.master, irdp_read_raw, NULL, sock, &t_irdp_raw);
return sock;
}
stream_free(s);
}
-void irdp_send_thread(struct thread *t_advert)
+void irdp_send_thread(struct event *t_advert)
{
uint32_t timer, tmp;
- struct interface *ifp = THREAD_ARG(t_advert);
+ struct interface *ifp = EVENT_ARG(t_advert);
struct zebra_if *zi = ifp->info;
struct irdp_interface *irdp = zi->irdp;
struct prefix *p;
timer);
irdp->t_advertise = NULL;
- thread_add_timer(zrouter.master, irdp_send_thread, ifp, timer,
- &irdp->t_advertise);
+ event_add_timer(zrouter.master, irdp_send_thread, ifp, timer,
+ &irdp->t_advertise);
}
void irdp_advert_off(struct interface *ifp)
if (!irdp)
return;
- THREAD_OFF(irdp->t_advertise);
+ EVENT_OFF(irdp->t_advertise);
if (ifp->connected)
for (ALL_LIST_ELEMENTS(ifp->connected, node, nnode, ifc)) {
return;
irdp->flags |= IF_SOLICIT;
- THREAD_OFF(irdp->t_advertise);
+ EVENT_OFF(irdp->t_advertise);
timer = (frr_weak_random() % MAX_RESPONSE_DELAY) + 1;
irdp->t_advertise = NULL;
- thread_add_timer(zrouter.master, irdp_send_thread, ifp, timer,
- &irdp->t_advertise);
+ event_add_timer(zrouter.master, irdp_send_thread, ifp, timer,
+ &irdp->t_advertise);
}
static int irdp_finish(void)
return 0;
}
-static int irdp_init(struct thread_master *master)
+static int irdp_init(struct event_loop *master)
{
irdp_if_init();
#include "sockunion.h"
#include "sockunion.h"
#include "stream.h"
-#include "thread.h"
+#include "frrevent.h"
#include "vty.h"
#include "zclient.h"
#include "lib_errors.h"
int irdp_sock = -1;
-extern struct thread *t_irdp_raw;
+extern struct event *t_irdp_raw;
static void parse_irdp_packet(char *p, int len, struct interface *ifp)
{
return ret;
}
-void irdp_read_raw(struct thread *r)
+void irdp_read_raw(struct event *r)
{
struct interface *ifp;
struct zebra_if *zi;
char buf[IRDP_RX_BUF];
int ret, ifindex = 0;
- int irdp_sock = THREAD_FD(r);
- thread_add_read(zrouter.master, irdp_read_raw, NULL, irdp_sock,
- &t_irdp_raw);
+ int irdp_sock = EVENT_FD(r);
+ event_add_read(zrouter.master, irdp_read_raw, NULL, irdp_sock,
+ &t_irdp_raw);
ret = irdp_recvmsg(irdp_sock, (uint8_t *)buf, IRDP_RX_BUF, &ifindex);
#include "table.h"
#include "memory.h"
#include "rib.h"
-#include "thread.h"
+#include "frrevent.h"
#include "privs.h"
#include "nexthop.h"
#include "vrf.h"
{RTN_XRESOLVE, "resolver"},
{0}};
-extern struct thread_master *master;
+extern struct event_loop *master;
extern struct zebra_privs_t zserv_privs;
return 0;
}
-static void kernel_read(struct thread *thread)
+static void kernel_read(struct event *thread)
{
- struct zebra_ns *zns = (struct zebra_ns *)THREAD_ARG(thread);
+ struct zebra_ns *zns = (struct zebra_ns *)EVENT_ARG(thread);
struct zebra_dplane_info dp_info;
/* Capture key info from ns struct */
netlink_parse_info(netlink_information_fetch, &zns->netlink, &dp_info,
5, false);
- thread_add_read(zrouter.master, kernel_read, zns, zns->netlink.sock,
- &zns->t_netlink);
+ event_add_read(zrouter.master, kernel_read, zns, zns->netlink.sock,
+ &zns->t_netlink);
}
/*
zns->t_netlink = NULL;
- thread_add_read(zrouter.master, kernel_read, zns,
- zns->netlink.sock, &zns->t_netlink);
+ event_add_read(zrouter.master, kernel_read, zns, zns->netlink.sock,
+ &zns->t_netlink);
rt_netlink_init();
}
void kernel_terminate(struct zebra_ns *zns, bool complete)
{
- THREAD_OFF(zns->t_netlink);
+ EVENT_OFF(zns->t_netlink);
kernel_nlsock_fini(&zns->netlink);
}
-#include "thread.h"
+#include "frrevent.h"
#include "zebra/zserv.h"
/* For debug purpose. */
#endif /* RTAX_MAX */
/* Kernel routing table and interface updates via routing socket. */
-static void kernel_read(struct thread *thread)
+static void kernel_read(struct event *thread)
{
int sock;
int nbytes;
} buf;
/* Fetch routing socket. */
- sock = THREAD_FD(thread);
+ sock = EVENT_FD(thread);
nbytes = read(sock, &buf, sizeof(buf));
* shortage and is not harmful for consistency of
* reading the routing socket. Ignore it.
*/
- thread_add_read(zrouter.master, kernel_read, NULL, sock,
- NULL);
+ event_add_read(zrouter.master, kernel_read, NULL, sock,
+ NULL);
return;
#else
flog_err(EC_ZEBRA_RECVMSG_OVERRUN,
if (nbytes == 0)
return;
- thread_add_read(zrouter.master, kernel_read, NULL, sock, NULL);
+ event_add_read(zrouter.master, kernel_read, NULL, sock, NULL);
if (IS_ZEBRA_DEBUG_KERNEL)
rtmsg_debug(&buf.r.rtm);
}
/* kernel_read needs rewrite. */
- thread_add_read(zrouter.master, kernel_read, NULL, routing_sock, NULL);
+ event_add_read(zrouter.master, kernel_read, NULL, routing_sock, NULL);
}
/* Exported interface function. This function simply calls
#include <stdint.h>
#include "lib/linklist.h"
-#include "lib/thread.h"
+#include "frrevent.h"
#include "lib/hook.h"
#include "zebra/zserv.h"
#include <lib/version.h>
#include "getopt.h"
#include "command.h"
-#include "thread.h"
+#include "frrevent.h"
#include "filter.h"
#include "memory.h"
#include "prefix.h"
pid_t pid;
/* Pacify zclient.o in libfrr, which expects this variable. */
-struct thread_master *master;
+struct event_loop *master;
/* Route retain mode flag. */
int retain_mode = 0;
* Final shutdown step for the zebra main thread. This is run after all
* async update processing has completed.
*/
-void zebra_finalize(struct thread *dummy)
+void zebra_finalize(struct event *dummy)
{
zlog_info("Zebra final shutdown");
* we have to have route_read() called before.
*/
zrouter.startup_time = monotime(NULL);
- thread_add_timer(zrouter.master, rib_sweep_route, NULL,
- graceful_restart, &zrouter.sweeper);
+ event_add_timer(zrouter.master, rib_sweep_route, NULL, graceful_restart,
+ &zrouter.sweeper);
/* Needed for BSD routing socket. */
pid = getpid();
return false;
afi = family2afi(rn->p.family);
- zvrf = vrf_info_lookup(re->vrf_id);
+ zvrf = zebra_vrf_lookup_by_id(re->vrf_id);
if (re->vrf_id == VRF_DEFAULT && zvrf->table_id != re->table)
return false;
extern void rib_update(enum rib_update_event event);
extern void rib_update_table(struct route_table *table,
enum rib_update_event event, int rtype);
-extern void rib_sweep_route(struct thread *t);
+extern void rib_sweep_route(struct event *t);
extern void rib_sweep_table(struct route_table *table);
extern void rib_close_table(struct route_table *table);
extern void rib_init(void);
argv_find(argv, argc, "NAME", &idx);
VRF_GET_ID(vrf_id, argv[idx]->arg, false);
- zvrf = vrf_info_lookup(vrf_id);
+ zvrf = zebra_vrf_lookup_by_id(vrf_id);
router_id_set(AFI_IP, &rid, zvrf);
return CMD_SUCCESS;
argv_find(argv, argc, "NAME", &idx);
VRF_GET_ID(vrf_id, argv[idx]->arg, false);
- zvrf = vrf_info_lookup(vrf_id);
+ zvrf = zebra_vrf_lookup_by_id(vrf_id);
router_id_set(AFI_IP6, &rid, zvrf);
return CMD_SUCCESS;
if (argv_find(argv, argc, "NAME", &idx))
VRF_GET_ID(vrf_id, argv[idx]->arg, false);
- zvrf = vrf_info_lookup(vrf_id);
+ zvrf = zebra_vrf_lookup_by_id(vrf_id);
router_id_set(AFI_IP, &rid, zvrf);
return CMD_SUCCESS;
if (argv_find(argv, argc, "NAME", &idx))
VRF_GET_ID(vrf_id, argv[idx]->arg, false);
- zvrf = vrf_info_lookup(vrf_id);
+ zvrf = zebra_vrf_lookup_by_id(vrf_id);
router_id_set(AFI_IP6, &rid, zvrf);
return CMD_SUCCESS;
vrf_name = argv[idx]->arg;
}
- zvrf = vrf_info_lookup(vrf_id);
+ zvrf = zebra_vrf_lookup_by_id(vrf_id);
if (zvrf != NULL) {
if (is_ipv6) {
#include "table.h"
#include "memory.h"
#include "rib.h"
-#include "thread.h"
+#include "frrevent.h"
#include "privs.h"
#include "nexthop.h"
#include "vrf.h"
#include "memory.h"
#include "sockopt.h"
-#include "thread.h"
+#include "frrevent.h"
#include "if.h"
#include "stream.h"
#include "log.h"
zif->ra_sent++;
}
-static void rtadv_timer(struct thread *thread)
+static void rtadv_timer(struct event *thread)
{
- struct zebra_vrf *zvrf = THREAD_ARG(thread);
+ struct zebra_vrf *zvrf = EVENT_ARG(thread);
struct vrf *vrf;
struct interface *ifp;
struct zebra_if *zif;
return;
}
-static void rtadv_read(struct thread *thread)
+static void rtadv_read(struct event *thread)
{
int sock;
int len;
struct sockaddr_in6 from;
ifindex_t ifindex = 0;
int hoplimit = -1;
- struct zebra_vrf *zvrf = THREAD_ARG(thread);
+ struct zebra_vrf *zvrf = EVENT_ARG(thread);
- sock = THREAD_FD(thread);
+ sock = EVENT_FD(thread);
zvrf->rtadv.ra_read = NULL;
/* Register myself. */
switch (event) {
case RTADV_START:
- thread_add_read(zrouter.master, rtadv_read, zvrf, rtadv->sock,
- &rtadv->ra_read);
- thread_add_event(zrouter.master, rtadv_timer, zvrf, 0,
- &rtadv->ra_timer);
+ event_add_read(zrouter.master, rtadv_read, zvrf, rtadv->sock,
+ &rtadv->ra_read);
+ event_add_event(zrouter.master, rtadv_timer, zvrf, 0,
+ &rtadv->ra_timer);
break;
case RTADV_STOP:
- THREAD_OFF(rtadv->ra_timer);
- THREAD_OFF(rtadv->ra_read);
+ EVENT_OFF(rtadv->ra_timer);
+ EVENT_OFF(rtadv->ra_read);
break;
case RTADV_TIMER:
- thread_add_timer(zrouter.master, rtadv_timer, zvrf, val,
- &rtadv->ra_timer);
+ event_add_timer(zrouter.master, rtadv_timer, zvrf, val,
+ &rtadv->ra_timer);
break;
case RTADV_TIMER_MSEC:
- thread_add_timer_msec(zrouter.master, rtadv_timer, zvrf, val,
- &rtadv->ra_timer);
+ event_add_timer_msec(zrouter.master, rtadv_timer, zvrf, val,
+ &rtadv->ra_timer);
break;
case RTADV_READ:
- thread_add_read(zrouter.master, rtadv_read, zvrf, rtadv->sock,
- &rtadv->ra_read);
+ event_add_read(zrouter.master, rtadv_read, zvrf, rtadv->sock,
+ &rtadv->ra_read);
break;
default:
break;
struct adv_if_list_head adv_if;
struct adv_if_list_head adv_msec_if;
- struct thread *ra_read;
- struct thread *ra_timer;
+ struct event *ra_read;
+ struct event *ra_timer;
};
PREDECL_RBTREE_UNIQ(rtadv_prefixes);
* Init entry point called during zebra startup. This is registered during
* module init.
*/
-static int init_sample_plugin(struct thread_master *tm)
+static int init_sample_plugin(struct event_loop *tm)
{
int ret;
#include <stdint.h>
#include "lib/linklist.h"
-#include "lib/thread.h"
+#include "frrevent.h"
#include "lib/ns.h"
#include "zebra/zserv.h"
uint32_t label_index = MPLS_INVALID_LABEL_INDEX;
s = msg;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!zvrf)
return;
uint16_t flags;
s = msg;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!zvrf)
return;
struct zebra_vrf *zvrf;
struct zserv *client;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) {
/* Do not send unsolicited messages to synchronous clients. */
if (client->synchronous)
struct zebra_dplane_info info;
/* Request data from the OS */
- struct thread *t_request;
+ struct event *t_request;
/* Read event */
- struct thread *t_read;
+ struct event *t_read;
/* List linkage */
struct zns_info_list_item link;
struct frr_pthread *dg_pthread;
/* Event-delivery context 'master' for the dplane */
- struct thread_master *dg_master;
+ struct event_loop *dg_master;
/* Event/'thread' pointer for queued updates */
- struct thread *dg_t_update;
+ struct event *dg_t_update;
/* Event pointer for pending shutdown check loop */
- struct thread *dg_t_shutdown_check;
+ struct event *dg_t_shutdown_check;
} zdplane_info;
#define DPLANE_PROV_UNLOCK(p) pthread_mutex_unlock(&((p)->dp_mutex))
/* Prototypes */
-static void dplane_thread_loop(struct thread *event);
+static void dplane_thread_loop(struct event *event);
static enum zebra_dplane_result lsp_update_internal(struct zebra_lsp *lsp,
enum dplane_op_e op);
static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
*/
/* Obtain thread_master for dataplane thread */
-struct thread_master *dplane_get_thread_master(void)
+struct event_loop *dplane_get_thread_master(void)
{
return zdplane_info.dg_master;
}
* Callback when an OS (netlink) incoming event read is ready. This runs
* in the dplane pthread.
*/
-static void dplane_incoming_read(struct thread *event)
+static void dplane_incoming_read(struct event *event)
{
- struct dplane_zns_info *zi = THREAD_ARG(event);
+ struct dplane_zns_info *zi = EVENT_ARG(event);
kernel_dplane_read(&zi->info);
/* Re-start read task */
- thread_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
- zi->info.sock, &zi->t_read);
+ event_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
+ zi->info.sock, &zi->t_read);
}
/*
* Callback in the dataplane pthread that requests info from the OS and
* initiates netlink reads.
*/
-static void dplane_incoming_request(struct thread *event)
+static void dplane_incoming_request(struct event *event)
{
- struct dplane_zns_info *zi = THREAD_ARG(event);
+ struct dplane_zns_info *zi = EVENT_ARG(event);
/* Start read task */
- thread_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
- zi->info.sock, &zi->t_read);
+ event_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
+ zi->info.sock, &zi->t_read);
/* Send requests */
netlink_request_netconf(zi->info.sock);
* pthread is running, we'll initiate this later on.
*/
if (zdplane_info.dg_master)
- thread_add_event(zdplane_info.dg_master,
- dplane_incoming_request, zi, 0,
- &zi->t_request);
+ event_add_event(zdplane_info.dg_master, dplane_incoming_request,
+ zi, 0, &zi->t_request);
}
#endif /* HAVE_NETLINK */
/* Stop any outstanding tasks */
if (zdplane_info.dg_master) {
- thread_cancel_async(zdplane_info.dg_master,
- &zi->t_request, NULL);
+ event_cancel_async(zdplane_info.dg_master,
+ &zi->t_request, NULL);
- thread_cancel_async(zdplane_info.dg_master, &zi->t_read,
- NULL);
+ event_cancel_async(zdplane_info.dg_master, &zi->t_read,
+ NULL);
}
XFREE(MTYPE_DP_NS, zi);
* available.
*/
if (zdplane_info.dg_run) {
- thread_add_event(zdplane_info.dg_master,
- dplane_thread_loop, NULL, 0,
- &zdplane_info.dg_t_update);
+ event_add_event(zdplane_info.dg_master, dplane_thread_loop,
+ NULL, 0, &zdplane_info.dg_t_update);
}
return AOK;
* final zebra shutdown.
* This runs in the dplane pthread context.
*/
-static void dplane_check_shutdown_status(struct thread *event)
+static void dplane_check_shutdown_status(struct event *event)
{
struct dplane_zns_info *zi;
zns_info_list_del(&zdplane_info.dg_zns_list, zi);
if (zdplane_info.dg_master) {
- THREAD_OFF(zi->t_read);
- THREAD_OFF(zi->t_request);
+ EVENT_OFF(zi->t_read);
+ EVENT_OFF(zi->t_request);
}
XFREE(MTYPE_DP_NS, zi);
if (dplane_work_pending()) {
/* Reschedule dplane check on a short timer */
- thread_add_timer_msec(zdplane_info.dg_master,
- dplane_check_shutdown_status,
- NULL, 100,
- &zdplane_info.dg_t_shutdown_check);
+ event_add_timer_msec(zdplane_info.dg_master,
+ dplane_check_shutdown_status, NULL, 100,
+ &zdplane_info.dg_t_shutdown_check);
/* TODO - give up and stop waiting after a short time? */
/* We appear to be done - schedule a final callback event
* for the zebra main pthread.
*/
- thread_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
+ event_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
}
}
if (IS_ZEBRA_DEBUG_DPLANE)
zlog_debug("Zebra dataplane fini called");
- thread_add_event(zdplane_info.dg_master,
- dplane_check_shutdown_status, NULL, 0,
- &zdplane_info.dg_t_shutdown_check);
+ event_add_event(zdplane_info.dg_master, dplane_check_shutdown_status,
+ NULL, 0, &zdplane_info.dg_t_shutdown_check);
}
/*
* pthread can look for other pending work - such as i/o work on behalf of
* providers.
*/
-static void dplane_thread_loop(struct thread *event)
+static void dplane_thread_loop(struct event *event)
{
struct dplane_ctx_list_head work_list;
struct dplane_ctx_list_head error_list;
zdplane_info.dg_run = false;
if (zdplane_info.dg_t_update)
- thread_cancel_async(zdplane_info.dg_t_update->master,
- &zdplane_info.dg_t_update, NULL);
+ event_cancel_async(zdplane_info.dg_t_update->master,
+ &zdplane_info.dg_t_update, NULL);
frr_pthread_stop(zdplane_info.dg_pthread, NULL);
zdplane_info.dg_run = true;
/* Enqueue an initial event for the dataplane pthread */
- thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
- &zdplane_info.dg_t_update);
+ event_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
+ &zdplane_info.dg_t_update);
/* Enqueue requests and reads if necessary */
frr_each (zns_info_list, &zdplane_info.dg_zns_list, zi) {
#if defined(HAVE_NETLINK)
- thread_add_read(zdplane_info.dg_master, dplane_incoming_read,
- zi, zi->info.sock, &zi->t_read);
+ event_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
+ zi->info.sock, &zi->t_read);
dplane_kernel_info_request(zi);
#endif
}
void dplane_provider_unlock(struct zebra_dplane_provider *prov);
/* Obtain thread_master for dataplane thread */
-struct thread_master *dplane_get_thread_master(void);
+struct event_loop *dplane_get_thread_master(void);
/* Providers should (generally) limit number of updates per work cycle */
int dplane_provider_get_work_limit(const struct zebra_dplane_provider *prov);
return buf;
}
-static void zebra_evpn_dad_mac_auto_recovery_exp(struct thread *t)
+static void zebra_evpn_dad_mac_auto_recovery_exp(struct event *t)
{
struct zebra_vrf *zvrf = NULL;
struct zebra_mac *mac = NULL;
struct listnode *node = NULL;
struct zebra_neigh *nbr = NULL;
- mac = THREAD_ARG(t);
+ mac = EVENT_ARG(t);
/* since this is asynchronous we need sanity checks*/
- zvrf = vrf_info_lookup(mac->zevpn->vrf_id);
+ zvrf = zebra_vrf_lookup_by_id(mac->zevpn->vrf_id);
if (!zvrf)
return;
}
/* Start auto recovery timer for this MAC */
- THREAD_OFF(mac->dad_mac_auto_recovery_timer);
+ EVENT_OFF(mac->dad_mac_auto_recovery_timer);
if (zvrf->dad_freeze && zvrf->dad_freeze_time) {
if (IS_ZEBRA_DEBUG_VXLAN) {
char mac_buf[MAC_BUF_SIZE];
zvrf->dad_freeze_time);
}
- thread_add_timer(zrouter.master,
- zebra_evpn_dad_mac_auto_recovery_exp,
- mac, zvrf->dad_freeze_time,
- &mac->dad_mac_auto_recovery_timer);
+ event_add_timer(zrouter.master,
+ zebra_evpn_dad_mac_auto_recovery_exp,
+ mac, zvrf->dad_freeze_time,
+ &mac->dad_mac_auto_recovery_timer);
}
/* In case of local update, do not inform to client (BGPd),
struct zebra_vrf *zvrf;
struct timeval detect_start_time = {0, 0};
char timebuf[MONOTIME_STRLEN];
- char thread_buf[THREAD_TIMER_STRLEN];
+ char thread_buf[EVENT_TIMER_STRLEN];
time_t uptime;
char up_str[MONOTIME_STRLEN];
if (mac->hold_timer)
json_object_string_add(
json_mac, "peerActiveHold",
- thread_timer_to_hhmmss(thread_buf,
- sizeof(thread_buf),
- mac->hold_timer));
+ event_timer_to_hhmmss(thread_buf,
+ sizeof(thread_buf),
+ mac->hold_timer));
if (mac->es)
json_object_string_add(json_mac, "esi",
mac->es->esi_str);
vty_out(vty, " peer-active");
if (mac->hold_timer)
vty_out(vty, " (ht: %s)",
- thread_timer_to_hhmmss(thread_buf,
- sizeof(thread_buf),
- mac->hold_timer));
+ event_timer_to_hhmmss(thread_buf,
+ sizeof(thread_buf),
+ mac->hold_timer));
vty_out(vty, "\n");
vty_out(vty, " Local Seq: %u Remote Seq: %u\n", mac->loc_seq,
mac->rem_seq);
zebra_evpn_mac_stop_hold_timer(mac);
/* Cancel auto recovery */
- THREAD_OFF(mac->dad_mac_auto_recovery_timer);
+ EVENT_OFF(mac->dad_mac_auto_recovery_timer);
/* If the MAC is freed before the neigh we will end up
* with a stale pointer against the neigh.
* external neighmgr daemon to probe existing hosts to independently
* establish their presence on the ES.
*/
-static void zebra_evpn_mac_hold_exp_cb(struct thread *t)
+static void zebra_evpn_mac_hold_exp_cb(struct event *t)
{
struct zebra_mac *mac;
bool old_bgp_ready;
bool old_static;
bool new_static;
- mac = THREAD_ARG(t);
+ mac = EVENT_ARG(t);
/* the purpose of the hold timer is to age out the peer-active
* flag
*/
zebra_evpn_zebra_mac_flag_dump(mac, mac_buf,
sizeof(mac_buf)));
}
- thread_add_timer(zrouter.master, zebra_evpn_mac_hold_exp_cb, mac,
- zmh_info->mac_hold_time, &mac->hold_timer);
+ event_add_timer(zrouter.master, zebra_evpn_mac_hold_exp_cb, mac,
+ zmh_info->mac_hold_time, &mac->hold_timer);
}
void zebra_evpn_mac_stop_hold_timer(struct zebra_mac *mac)
sizeof(mac_buf)));
}
- THREAD_OFF(mac->hold_timer);
+ EVENT_OFF(mac->hold_timer);
}
void zebra_evpn_sync_mac_del(struct zebra_mac *mac)
/* Duplicate mac detection */
uint32_t dad_count;
- struct thread *dad_mac_auto_recovery_timer;
+ struct event *dad_mac_auto_recovery_timer;
struct timeval detect_start_time;
time_t dad_dup_detect_time;
/* used for ageing out the PEER_ACTIVE flag */
- struct thread *hold_timer;
+ struct event *hold_timer;
/* number of neigh entries (using this mac) that have
* ZEBRA_MAC_ES_PEER_ACTIVE or ZEBRA_NEIGH_ES_PEER_PROXY
zebra_evpn_acc_vl_adv_svi_mac_all();
}
-static void zebra_evpn_es_df_delay_exp_cb(struct thread *t)
+static void zebra_evpn_es_df_delay_exp_cb(struct event *t)
{
struct zebra_evpn_es *es;
- es = THREAD_ARG(t);
+ es = EVENT_ARG(t);
if (IS_ZEBRA_DEBUG_EVPN_MH_ES)
zlog_debug("es %s df-delay expired", es->esi_str);
/* Start the DF delay timer on the local ES */
if (!es->df_delay_timer)
- thread_add_timer(zrouter.master, zebra_evpn_es_df_delay_exp_cb,
- es, ZEBRA_EVPN_MH_DF_DELAY_TIME,
- &es->df_delay_timer);
+ event_add_timer(zrouter.master, zebra_evpn_es_df_delay_exp_cb,
+ es, ZEBRA_EVPN_MH_DF_DELAY_TIME,
+ &es->df_delay_timer);
/* See if the local VTEP can function as DF on the ES */
if (!zebra_evpn_es_run_df_election(es, __func__)) {
es->flags &= ~(ZEBRA_EVPNES_LOCAL | ZEBRA_EVPNES_READY_FOR_BGP);
- THREAD_OFF(es->df_delay_timer);
+ EVENT_OFF(es->df_delay_timer);
/* clear EVPN protodown flags on the access port */
zebra_evpn_mh_clear_protodown_es(es);
char alg_buf[EVPN_DF_ALG_STR_LEN];
struct zebra_evpn_es_vtep *es_vtep;
struct listnode *node;
- char thread_buf[THREAD_TIMER_STRLEN];
+ char thread_buf[EVENT_TIMER_STRLEN];
if (json) {
json_object *json_vteps;
if (es->df_delay_timer)
json_object_string_add(
json, "dfDelayTimer",
- thread_timer_to_hhmmss(thread_buf,
- sizeof(thread_buf),
- es->df_delay_timer));
+ event_timer_to_hhmmss(thread_buf,
+ sizeof(thread_buf),
+ es->df_delay_timer));
json_object_int_add(json, "nexthopGroup", es->nhg_id);
if (listcount(es->es_vtep_list)) {
json_vteps = json_object_new_array();
: "df");
if (es->df_delay_timer)
vty_out(vty, " DF delay: %s\n",
- thread_timer_to_hhmmss(thread_buf,
- sizeof(thread_buf),
- es->df_delay_timer));
+ event_timer_to_hhmmss(thread_buf,
+ sizeof(thread_buf),
+ es->df_delay_timer));
vty_out(vty, " DF preference: %u\n", es->df_pref);
vty_out(vty, " Nexthop group: %u\n", es->nhg_id);
vty_out(vty, " VTEPs:\n");
void zebra_evpn_mh_json(json_object *json)
{
json_object *json_array;
- char thread_buf[THREAD_TIMER_STRLEN];
+ char thread_buf[EVENT_TIMER_STRLEN];
json_object_int_add(json, "macHoldtime", zmh_info->mac_hold_time);
json_object_int_add(json, "neighHoldtime", zmh_info->neigh_hold_time);
json_object_int_add(json, "startupDelay", zmh_info->startup_delay_time);
json_object_string_add(
json, "startupDelayTimer",
- thread_timer_to_hhmmss(thread_buf, sizeof(thread_buf),
- zmh_info->startup_delay_timer));
+ event_timer_to_hhmmss(thread_buf, sizeof(thread_buf),
+ zmh_info->startup_delay_timer));
json_object_int_add(json, "uplinkConfigCount",
zmh_info->uplink_cfg_cnt);
json_object_int_add(json, "uplinkActiveCount",
void zebra_evpn_mh_print(struct vty *vty)
{
char pd_buf[ZEBRA_PROTODOWN_RC_STR_LEN];
- char thread_buf[THREAD_TIMER_STRLEN];
+ char thread_buf[EVENT_TIMER_STRLEN];
vty_out(vty, "EVPN MH:\n");
vty_out(vty, " mac-holdtime: %ds, neigh-holdtime: %ds\n",
zmh_info->mac_hold_time, zmh_info->neigh_hold_time);
vty_out(vty, " startup-delay: %ds, start-delay-timer: %s\n",
zmh_info->startup_delay_time,
- thread_timer_to_hhmmss(thread_buf, sizeof(thread_buf),
- zmh_info->startup_delay_timer));
+ event_timer_to_hhmmss(thread_buf, sizeof(thread_buf),
+ zmh_info->startup_delay_timer));
vty_out(vty, " uplink-cfg-cnt: %u, uplink-active-cnt: %u\n",
zmh_info->uplink_cfg_cnt, zmh_info->uplink_oper_up_cnt);
if (zmh_info->protodown_rc)
new_protodown);
}
-static void zebra_evpn_mh_startup_delay_exp_cb(struct thread *t)
+static void zebra_evpn_mh_startup_delay_exp_cb(struct event *t)
{
if (IS_ZEBRA_DEBUG_EVPN_MH_ES)
zlog_debug("startup-delay expired");
if (zmh_info->startup_delay_timer) {
if (IS_ZEBRA_DEBUG_EVPN_MH_ES)
zlog_debug("startup-delay timer cancelled");
- THREAD_OFF(zmh_info->startup_delay_timer);
+ EVENT_OFF(zmh_info->startup_delay_timer);
}
if (zmh_info->startup_delay_time) {
zlog_debug(
"startup-delay timer started for %d sec on %s",
zmh_info->startup_delay_time, rc);
- thread_add_timer(zrouter.master,
- zebra_evpn_mh_startup_delay_exp_cb, NULL,
- zmh_info->startup_delay_time,
- &zmh_info->startup_delay_timer);
+ event_add_timer(zrouter.master,
+ zebra_evpn_mh_startup_delay_exp_cb, NULL,
+ zmh_info->startup_delay_time,
+ &zmh_info->startup_delay_timer);
zebra_evpn_mh_update_protodown(
ZEBRA_PROTODOWN_EVPN_STARTUP_DELAY, true /* set */);
} else {
* imported before running the DF election.
*/
#define ZEBRA_EVPN_MH_DF_DELAY_TIME 3 /* seconds */
- struct thread *df_delay_timer;
+ struct event *df_delay_timer;
};
RB_HEAD(zebra_es_rb_head, zebra_evpn_es);
RB_PROTOTYPE(zebra_es_rb_head, zebra_evpn_es, rb_node, zebra_es_rb_cmp);
*/
int startup_delay_time; /* seconds */
#define ZEBRA_EVPN_MH_STARTUP_DELAY_DEF (3 * 60)
- struct thread *startup_delay_timer;
+ struct event *startup_delay_timer;
/* Number of configured uplinks */
uint32_t uplink_cfg_cnt;
* external neighmgr daemon to probe existing hosts to independently
* establish their presence on the ES.
*/
-static void zebra_evpn_neigh_hold_exp_cb(struct thread *t)
+static void zebra_evpn_neigh_hold_exp_cb(struct event *t)
{
struct zebra_neigh *n;
bool old_bgp_ready;
bool old_n_static;
bool new_n_static;
- n = THREAD_ARG(t);
+ n = EVENT_ARG(t);
/* the purpose of the hold timer is to age out the peer-active
* flag
*/
if (IS_ZEBRA_DEBUG_EVPN_MH_NEIGH)
zlog_debug("sync-neigh vni %u ip %pIA mac %pEA 0x%x hold start",
n->zevpn->vni, &n->ip, &n->emac, n->flags);
- thread_add_timer(zrouter.master, zebra_evpn_neigh_hold_exp_cb, n,
- zmh_info->neigh_hold_time, &n->hold_timer);
+ event_add_timer(zrouter.master, zebra_evpn_neigh_hold_exp_cb, n,
+ zmh_info->neigh_hold_time, &n->hold_timer);
}
static void zebra_evpn_local_neigh_deref_mac(struct zebra_neigh *n,
listnode_delete(n->mac->neigh_list, n);
/* Cancel auto recovery */
- THREAD_OFF(n->dad_ip_auto_recovery_timer);
+ EVENT_OFF(n->dad_ip_auto_recovery_timer);
/* Cancel proxy hold timer */
zebra_evpn_neigh_stop_hold_timer(n);
return 0;
}
-static void zebra_evpn_dad_ip_auto_recovery_exp(struct thread *t)
+static void zebra_evpn_dad_ip_auto_recovery_exp(struct event *t)
{
struct zebra_vrf *zvrf = NULL;
struct zebra_neigh *nbr = NULL;
struct zebra_evpn *zevpn = NULL;
- nbr = THREAD_ARG(t);
+ nbr = EVENT_ARG(t);
/* since this is asynchronous we need sanity checks*/
zvrf = vrf_info_lookup(nbr->zevpn->vrf_id);
nbr->dad_dup_detect_time = monotime(NULL);
/* Start auto recovery timer for this IP */
- THREAD_OFF(nbr->dad_ip_auto_recovery_timer);
+ EVENT_OFF(nbr->dad_ip_auto_recovery_timer);
if (zvrf->dad_freeze && zvrf->dad_freeze_time) {
if (IS_ZEBRA_DEBUG_VXLAN)
zlog_debug(
__func__, &nbr->emac, &nbr->ip,
nbr->flags, zvrf->dad_freeze_time);
- thread_add_timer(zrouter.master,
- zebra_evpn_dad_ip_auto_recovery_exp,
- nbr, zvrf->dad_freeze_time,
- &nbr->dad_ip_auto_recovery_timer);
+ event_add_timer(zrouter.master,
+ zebra_evpn_dad_ip_auto_recovery_exp,
+ nbr, zvrf->dad_freeze_time,
+ &nbr->dad_ip_auto_recovery_timer);
}
if (zvrf->dad_freeze)
*is_dup_detect = true;
nbr->detect_start_time.tv_sec = 0;
nbr->detect_start_time.tv_usec = 0;
nbr->dad_dup_detect_time = 0;
- THREAD_OFF(nbr->dad_ip_auto_recovery_timer);
+ EVENT_OFF(nbr->dad_ip_auto_recovery_timer);
if (CHECK_FLAG(nbr->flags, ZEBRA_NEIGH_LOCAL)) {
zebra_evpn_neigh_send_add_to_client(zevpn->vni, &nbr->ip,
struct zebra_vrf *zvrf = NULL;
struct timeval detect_start_time = {0, 0};
char timebuf[MONOTIME_STRLEN];
- char thread_buf[THREAD_TIMER_STRLEN];
+ char thread_buf[EVENT_TIMER_STRLEN];
time_t uptime;
char up_str[MONOTIME_STRLEN];
}
if (n->hold_timer) {
vty_out(vty, " (ht: %s)",
- thread_timer_to_hhmmss(thread_buf,
- sizeof(thread_buf),
- n->hold_timer));
+ event_timer_to_hhmmss(thread_buf,
+ sizeof(thread_buf),
+ n->hold_timer));
sync_info = true;
}
if (!sync_info)
if (n->hold_timer)
json_object_string_add(
json, "peerActiveHold",
- thread_timer_to_hhmmss(thread_buf,
- sizeof(thread_buf),
- n->hold_timer));
+ event_timer_to_hhmmss(thread_buf,
+ sizeof(thread_buf),
+ n->hold_timer));
}
if (CHECK_FLAG(n->flags, ZEBRA_NEIGH_REMOTE)) {
if (n->mac->es) {
/* Duplicate ip detection */
uint32_t dad_count;
- struct thread *dad_ip_auto_recovery_timer;
+ struct event *dad_ip_auto_recovery_timer;
struct timeval detect_start_time;
time_t uptime;
/* used for ageing out the PEER_ACTIVE flag */
- struct thread *hold_timer;
+ struct event *hold_timer;
};
/*
if (IS_ZEBRA_DEBUG_EVPN_MH_NEIGH)
zlog_debug("sync-neigh vni %u ip %pIA mac %pEA 0x%x hold stop",
n->zevpn->vni, &n->ip, &n->emac, n->flags);
- THREAD_OFF(n->hold_timer);
+ EVENT_OFF(n->hold_timer);
}
void zebra_evpn_sync_neigh_static_chg(struct zebra_neigh *n, bool old_n_static,
#include "log.h"
#include "libfrr.h"
#include "stream.h"
-#include "thread.h"
+#include "frrevent.h"
#include "network.h"
#include "command.h"
#include "lib/version.h"
*/
enum zfpm_msg_format message_format;
- struct thread_master *master;
+ struct event_loop *master;
enum zfpm_state state;
/*
* Threads for I/O.
*/
- struct thread *t_connect;
- struct thread *t_write;
- struct thread *t_read;
+ struct event *t_connect;
+ struct event *t_write;
+ struct event *t_read;
/*
* Thread to clean up after the TCP connection to the FPM goes down
* and the state that belongs to it.
*/
- struct thread *t_conn_down;
+ struct event *t_conn_down;
struct {
struct zfpm_rnodes_iter iter;
* Thread to take actions once the TCP conn to the FPM comes up, and
* the state that belongs to it.
*/
- struct thread *t_conn_up;
+ struct event *t_conn_up;
struct {
struct zfpm_rnodes_iter iter;
/*
* Stats interval timer.
*/
- struct thread *t_stats;
+ struct event *t_stats;
/*
* If non-zero, the last time when statistics were cleared.
static int zfpm_trigger_update(struct route_node *rn, const char *reason);
-static void zfpm_read_cb(struct thread *thread);
-static void zfpm_write_cb(struct thread *thread);
+static void zfpm_read_cb(struct event *thread);
+static void zfpm_write_cb(struct event *thread);
static void zfpm_set_state(enum zfpm_state state, const char *reason);
static void zfpm_start_connect_timer(const char *reason);
/*
* zfpm_thread_should_yield
*/
-static inline int zfpm_thread_should_yield(struct thread *t)
+static inline int zfpm_thread_should_yield(struct event *t)
{
- return thread_should_yield(t);
+ return event_should_yield(t);
}
/*
assert(!zfpm_g->t_read);
assert(zfpm_g->sock >= 0);
- thread_add_read(zfpm_g->master, zfpm_read_cb, 0, zfpm_g->sock,
- &zfpm_g->t_read);
+ event_add_read(zfpm_g->master, zfpm_read_cb, 0, zfpm_g->sock,
+ &zfpm_g->t_read);
}
/*
assert(!zfpm_g->t_write);
assert(zfpm_g->sock >= 0);
- thread_add_write(zfpm_g->master, zfpm_write_cb, 0, zfpm_g->sock,
- &zfpm_g->t_write);
+ event_add_write(zfpm_g->master, zfpm_write_cb, 0, zfpm_g->sock,
+ &zfpm_g->t_write);
}
/*
*/
static inline void zfpm_read_off(void)
{
- THREAD_OFF(zfpm_g->t_read);
+ EVENT_OFF(zfpm_g->t_read);
}
/*
*/
static inline void zfpm_write_off(void)
{
- THREAD_OFF(zfpm_g->t_write);
+ EVENT_OFF(zfpm_g->t_write);
}
static inline void zfpm_connect_off(void)
{
- THREAD_OFF(zfpm_g->t_connect);
+ EVENT_OFF(zfpm_g->t_connect);
}
/*
* Callback for actions to be taken when the connection to the FPM
* comes up.
*/
-static void zfpm_conn_up_thread_cb(struct thread *thread)
+static void zfpm_conn_up_thread_cb(struct event *thread)
{
struct route_node *rnode;
struct zfpm_rnodes_iter *iter;
zfpm_g->stats.t_conn_up_yields++;
zfpm_rnodes_iter_pause(iter);
- thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb,
- NULL, 0, &zfpm_g->t_conn_up);
+ event_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb,
+ NULL, 0, &zfpm_g->t_conn_up);
return;
}
/*
* Start thread to push existing routes to the FPM.
*/
- THREAD_OFF(zfpm_g->t_conn_up);
+ EVENT_OFF(zfpm_g->t_conn_up);
zfpm_rnodes_iter_init(&zfpm_g->t_conn_up_state.iter);
zfpm_g->fpm_mac_dump_done = false;
zfpm_debug("Starting conn_up thread");
- thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb, NULL, 0,
- &zfpm_g->t_conn_up);
+ event_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb, NULL, 0,
+ &zfpm_g->t_conn_up);
zfpm_g->stats.t_conn_up_starts++;
}
* Callback that is invoked to clean up state after the TCP connection
* to the FPM goes down.
*/
-static void zfpm_conn_down_thread_cb(struct thread *thread)
+static void zfpm_conn_down_thread_cb(struct event *thread)
{
struct route_node *rnode;
struct zfpm_rnodes_iter *iter;
zfpm_g->stats.t_conn_down_yields++;
zfpm_rnodes_iter_pause(iter);
zfpm_g->t_conn_down = NULL;
- thread_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb,
- NULL, 0, &zfpm_g->t_conn_down);
+ event_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb,
+ NULL, 0, &zfpm_g->t_conn_down);
return;
}
assert(!zfpm_g->t_conn_down);
zfpm_rnodes_iter_init(&zfpm_g->t_conn_down_state.iter);
zfpm_g->t_conn_down = NULL;
- thread_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb, NULL, 0,
- &zfpm_g->t_conn_down);
+ event_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb, NULL, 0,
+ &zfpm_g->t_conn_down);
zfpm_g->stats.t_conn_down_starts++;
zfpm_set_state(ZFPM_STATE_IDLE, detail);
/*
* zfpm_read_cb
*/
-static void zfpm_read_cb(struct thread *thread)
+static void zfpm_read_cb(struct event *thread)
{
size_t already;
struct stream *ibuf;
/*
* zfpm_write_cb
*/
-static void zfpm_write_cb(struct thread *thread)
+static void zfpm_write_cb(struct event *thread)
{
struct stream *s;
int num_writes;
/*
* zfpm_connect_cb
*/
-static void zfpm_connect_cb(struct thread *t)
+static void zfpm_connect_cb(struct event *t)
{
int sock, ret;
struct sockaddr_in serv;
delay_secs = zfpm_calc_connect_delay();
zfpm_debug("scheduling connect in %ld seconds", delay_secs);
- thread_add_timer(zfpm_g->master, zfpm_connect_cb, 0, delay_secs,
- &zfpm_g->t_connect);
+ event_add_timer(zfpm_g->master, zfpm_connect_cb, 0, delay_secs,
+ &zfpm_g->t_connect);
zfpm_set_state(ZFPM_STATE_ACTIVE, reason);
}
/*
* struct zfpm_statsimer_cb
*/
-static void zfpm_stats_timer_cb(struct thread *t)
+static void zfpm_stats_timer_cb(struct event *t)
{
zfpm_g->t_stats = NULL;
return;
zfpm_debug("Stopping existing stats timer");
- THREAD_OFF(zfpm_g->t_stats);
+ EVENT_OFF(zfpm_g->t_stats);
}
/*
{
assert(!zfpm_g->t_stats);
- thread_add_timer(zfpm_g->master, zfpm_stats_timer_cb, 0,
- ZFPM_STATS_IVL_SECS, &zfpm_g->t_stats);
+ event_add_timer(zfpm_g->master, zfpm_stats_timer_cb, 0,
+ ZFPM_STATS_IVL_SECS, &zfpm_g->t_stats);
}
/*
*
* Returns true on success.
*/
-static int zfpm_init(struct thread_master *master)
+static int zfpm_init(struct event_loop *master)
{
int enable = 1;
uint16_t port = 0;
#include "lib/prefix.h"
#include "lib/command.h"
#include "lib/if.h"
-#include "lib/thread.h"
+#include "frrevent.h"
#include "lib/stream.h"
#include "lib/memory.h"
#include "lib/table.h"
* Forward declaration.
*/
static struct zserv *zebra_gr_find_stale_client(struct zserv *client);
-static void zebra_gr_route_stale_delete_timer_expiry(struct thread *thread);
+static void zebra_gr_route_stale_delete_timer_expiry(struct event *thread);
static int32_t zebra_gr_delete_stale_routes(struct client_gr_info *info);
static void zebra_gr_process_client_stale_routes(struct zserv *client,
vrf_id_t vrf_id);
/* Cancel the stale timer */
if (info->t_stale_removal != NULL) {
- THREAD_OFF(info->t_stale_removal);
+ EVENT_OFF(info->t_stale_removal);
info->t_stale_removal = NULL;
/* Process the stale routes */
- thread_execute(
- zrouter.master,
- zebra_gr_route_stale_delete_timer_expiry,
- info, 1);
+ event_execute(
+ zrouter.master,
+ zebra_gr_route_stale_delete_timer_expiry,
+ info, 1);
}
}
}
TAILQ_REMOVE(&(client->gr_info_queue), info, gr_info);
- THREAD_OFF(info->t_stale_removal);
+ EVENT_OFF(info->t_stale_removal);
XFREE(MTYPE_ZEBRA_GR, info->current_prefix);
&& (info->t_stale_removal == NULL)) {
struct vrf *vrf = vrf_lookup_by_id(info->vrf_id);
- thread_add_timer(
+ event_add_timer(
zrouter.master,
zebra_gr_route_stale_delete_timer_expiry, info,
info->stale_removal_time,
* Delete all the stale routes that have not been refreshed
* post restart.
*/
-static void zebra_gr_route_stale_delete_timer_expiry(struct thread *thread)
+static void zebra_gr_route_stale_delete_timer_expiry(struct event *thread)
{
- struct client_gr_info *info = THREAD_ARG(thread);
+ struct client_gr_info *info = EVENT_ARG(thread);
int32_t cnt = 0;
struct zserv *client;
struct vrf *vrf = vrf_lookup_by_id(info->vrf_id);
__func__, zebra_route_string(client->proto),
VRF_LOGNAME(vrf), info->vrf_id, cnt);
- thread_add_timer(zrouter.master,
- zebra_gr_route_stale_delete_timer_expiry, info,
- ZEBRA_DEFAULT_STALE_UPDATE_DELAY,
- &info->t_stale_removal);
+ event_add_timer(zrouter.master,
+ zebra_gr_route_stale_delete_timer_expiry, info,
+ ZEBRA_DEFAULT_STALE_UPDATE_DELAY,
+ &info->t_stale_removal);
} else {
/* No routes to delete for the VRF */
LOG_GR("%s: Client %s vrf %s(%u) all stale routes processed",
LOG_GR("%s: Client %s canceled stale delete timer vrf %s(%d)",
__func__, zebra_route_string(client->proto),
VRF_LOGNAME(vrf), info->vrf_id);
- THREAD_OFF(info->t_stale_removal);
- thread_execute(zrouter.master,
- zebra_gr_route_stale_delete_timer_expiry, info,
- 0);
+ EVENT_OFF(info->t_stale_removal);
+ event_execute(zrouter.master,
+ zebra_gr_route_stale_delete_timer_expiry, info,
+ 0);
}
}
static void zebra_l2_bridge_vlan_table_destroy(struct hash *vlan_table)
{
- if (vlan_table) {
- hash_clean(vlan_table, zebra_l2_bridge_vlan_free);
- hash_free(vlan_table);
- }
+ hash_clean_and_free(&vlan_table, zebra_l2_bridge_vlan_free);
}
static struct hash *zebra_l2_bridge_vlan_table_create(void)
static bool test_mlag_in_progress;
static int zebra_mlag_signal_write_thread(void);
-static void zebra_mlag_terminate_pthread(struct thread *event);
-static void zebra_mlag_post_data_from_main_thread(struct thread *thread);
+static void zebra_mlag_terminate_pthread(struct event *event);
+static void zebra_mlag_post_data_from_main_thread(struct event *thread);
static void zebra_mlag_publish_process_state(struct zserv *client,
zebra_message_types_t msg_type);
* additional four bytes are for message type
*/
stream_putl_at(s, 0, msg_type);
- thread_add_event(zrouter.master, zebra_mlag_post_data_from_main_thread,
- s, 0, NULL);
+ event_add_event(zrouter.master, zebra_mlag_post_data_from_main_thread,
+ s, 0, NULL);
}
/**********************End of MLAG Interaction********************************/
* This thread reads the clients data from the Global queue and encodes with
* protobuf and pass on to the MLAG socket.
*/
-static void zebra_mlag_client_msg_handler(struct thread *event)
+static void zebra_mlag_client_msg_handler(struct event *event)
{
struct stream *s;
uint32_t wr_count = 0;
* main thread.
*/
if (msg_type == MLAG_DEREGISTER) {
- thread_add_event(zrouter.master,
- zebra_mlag_terminate_pthread,
- NULL, 0, NULL);
+ event_add_event(zrouter.master,
+ zebra_mlag_terminate_pthread,
+ NULL, 0, NULL);
}
}
* during Zebra Init/after MLAG thread is destroyed.
* so it is safe to use without any locking
*/
- thread_add_event(zrouter.mlag_info.th_master,
- zebra_mlag_client_msg_handler, NULL, 0,
- &zrouter.mlag_info.t_write);
+ event_add_event(zrouter.mlag_info.th_master,
+ zebra_mlag_client_msg_handler, NULL, 0,
+ &zrouter.mlag_info.t_write);
return 0;
}
s = stream_new(ZEBRA_HEADER_SIZE + ZEBRA_MLAG_METADATA_LEN);
stream_putl(s, ZEBRA_MLAG_MSG_BCAST);
zclient_create_header(s, msg_type, VRF_DEFAULT);
- thread_add_event(zrouter.master, zebra_mlag_post_data_from_main_thread,
- s, 0, NULL);
+ event_add_event(zrouter.master, zebra_mlag_post_data_from_main_thread,
+ s, 0, NULL);
}
/**************************End of Multi-entrant Apis**************************/
* main thread, because for that access was needed for clients list.
* so instead of forcing the locks, messages will be posted from main thread.
*/
-static void zebra_mlag_post_data_from_main_thread(struct thread *thread)
+static void zebra_mlag_post_data_from_main_thread(struct event *thread)
{
- struct stream *s = THREAD_ARG(thread);
+ struct stream *s = EVENT_ARG(thread);
struct stream *zebra_s = NULL;
struct listnode *node;
struct zserv *client;
* all clients are un-registered for MLAG Updates, terminate the
* MLAG write thread
*/
-static void zebra_mlag_terminate_pthread(struct thread *event)
+static void zebra_mlag_terminate_pthread(struct event *event)
{
if (IS_ZEBRA_DEBUG_MLAG)
zlog_debug("Zebra MLAG write thread terminate called");
#include "hook.h"
#include "module.h"
-#include "thread.h"
+#include "frrevent.h"
#include "frr_pthread.h"
#include "libfrr.h"
#include "lib/version.h"
*
*/
-static struct thread_master *zmlag_master;
+static struct event_loop *zmlag_master;
static int mlag_socket;
-static void zebra_mlag_connect(struct thread *thread);
-static void zebra_mlag_read(struct thread *thread);
+static void zebra_mlag_connect(struct event *thread);
+static void zebra_mlag_read(struct event *thread);
/*
* Write the data to MLAGD
static void zebra_mlag_sched_read(void)
{
- thread_add_read(zmlag_master, zebra_mlag_read, NULL, mlag_socket,
- &zrouter.mlag_info.t_read);
+ event_add_read(zmlag_master, zebra_mlag_read, NULL, mlag_socket,
+ &zrouter.mlag_info.t_read);
}
-static void zebra_mlag_read(struct thread *thread)
+static void zebra_mlag_read(struct event *thread)
{
static uint32_t mlag_rd_buf_offset;
uint32_t *msglen;
zebra_mlag_sched_read();
}
-static void zebra_mlag_connect(struct thread *thread)
+static void zebra_mlag_connect(struct event *thread)
{
struct sockaddr_un svr = {0};
svr.sun_path);
close(mlag_socket);
zrouter.mlag_info.timer_running = true;
- thread_add_timer(zmlag_master, zebra_mlag_connect, NULL, 10,
- &zrouter.mlag_info.t_read);
+ event_add_timer(zmlag_master, zebra_mlag_connect, NULL, 10,
+ &zrouter.mlag_info.t_read);
return;
}
zlog_debug("%s: Connection with MLAG is established ",
__func__);
- thread_add_read(zmlag_master, zebra_mlag_read, NULL, mlag_socket,
- &zrouter.mlag_info.t_read);
+ event_add_read(zmlag_master, zebra_mlag_read, NULL, mlag_socket,
+ &zrouter.mlag_info.t_read);
/*
* Connection is established with MLAGD, post to clients
*/
*/
static int zebra_mlag_private_monitor_state(void)
{
- thread_add_event(zmlag_master, zebra_mlag_connect, NULL, 0,
- &zrouter.mlag_info.t_read);
+ event_add_event(zmlag_master, zebra_mlag_connect, NULL, 0,
+ &zrouter.mlag_info.t_read);
return 0;
}
/*
* Connect only if any clients are showing interest
*/
- thread_add_event(zmlag_master, zebra_mlag_connect, NULL, 0,
- &zrouter.mlag_info.t_read);
+ event_add_event(zmlag_master, zebra_mlag_connect, NULL, 0,
+ &zrouter.mlag_info.t_read);
}
return 0;
}
#include "log.h"
#include "sockunion.h"
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "workqueue.h"
#include "prefix.h"
#include "routemap.h"
struct zebra_lsp *lsp;
struct zebra_nhlfe *oldbest, *newbest;
char buf[BUFSIZ], buf2[BUFSIZ];
- struct zebra_vrf *zvrf = vrf_info_lookup(VRF_DEFAULT);
+ struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
enum zebra_dplane_result res;
lsp = (struct zebra_lsp *)data;
if (zebra_router_in_shutdown())
return;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
assert(zvrf);
lsp_table = zvrf->lsp_table;
case DPLANE_OP_LSP_INSTALL:
case DPLANE_OP_LSP_UPDATE:
/* Look for zebra LSP object */
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (zvrf == NULL)
break;
dplane_ctx_get_in_label(ctx));
/* Look for zebra LSP object */
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (zvrf == NULL)
return;
*/
static int zebra_mpls_cleanup_fecs_for_client(struct zserv *client)
{
- struct zebra_vrf *zvrf = vrf_info_lookup(VRF_DEFAULT);
+ struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
struct route_node *rn;
struct zebra_fec *fec;
struct listnode *node;
void zebra_mpls_close_tables(struct zebra_vrf *zvrf)
{
hash_iterate(zvrf->lsp_table, lsp_uninstall_from_kernel, NULL);
- hash_clean(zvrf->lsp_table, lsp_table_free);
- hash_free(zvrf->lsp_table);
- hash_clean(zvrf->slsp_table, lsp_table_free);
- hash_free(zvrf->slsp_table);
+ hash_clean_and_free(&zvrf->lsp_table, lsp_table_free);
+ hash_clean_and_free(&zvrf->slsp_table, lsp_table_free);
route_table_finish(zvrf->fec_table[AFI_IP]);
route_table_finish(zvrf->fec_table[AFI_IP6]);
}
return CMD_WARNING_CONFIG_FAILED;
}
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!zvrf) {
vty_out(vty, "%% Default VRF does not exist\n");
return CMD_WARNING_CONFIG_FAILED;
uint32_t label;
int ret;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!zvrf) {
vty_out(vty, "%% Default VRF does not exist\n");
return CMD_WARNING_CONFIG_FAILED;
int write = 0;
struct zebra_vrf *zvrf;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!zvrf)
return 0;
struct prefix p;
int ret;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!zvrf)
return 0;
struct zebra_vrf *zvrf;
bool uj = use_json(argc, argv);
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
zebra_mpls_print_lsp_table(vty, zvrf, uj);
return CMD_SUCCESS;
}
struct zebra_vrf *zvrf;
bool uj = use_json(argc, argv);
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
label = atoi(argv[3]->arg);
zebra_mpls_print_lsp(vty, zvrf, label, uj);
return CMD_SUCCESS;
#include <sys/inotify.h>
#include <sys/stat.h>
-#include "thread.h"
+#include "frrevent.h"
#include "ns.h"
#include "command.h"
#include "memory.h"
#define ZEBRA_NS_POLLING_MAX_RETRIES 200
DEFINE_MTYPE_STATIC(ZEBRA, NETNS_MISC, "ZebraNetNSInfo");
-static struct thread *zebra_netns_notify_current;
+static struct event *zebra_netns_notify_current;
struct zebra_netns_info {
const char *netnspath;
unsigned int retries;
};
-static void zebra_ns_ready_read(struct thread *t);
+static void zebra_ns_ready_read(struct event *t);
static void zebra_ns_notify_create_context_from_entry_name(const char *name);
static int zebra_ns_continue_read(struct zebra_netns_info *zns_info,
int stop_retry);
-static void zebra_ns_notify_read(struct thread *t);
+static void zebra_ns_notify_read(struct event *t);
static struct vrf *vrf_handler_create(struct vty *vty, const char *vrfname)
{
XFREE(MTYPE_NETNS_MISC, zns_info);
return 0;
}
- thread_add_timer_msec(zrouter.master, zebra_ns_ready_read,
- (void *)zns_info, ZEBRA_NS_POLLING_INTERVAL_MSEC,
- NULL);
+ event_add_timer_msec(zrouter.master, zebra_ns_ready_read,
+ (void *)zns_info, ZEBRA_NS_POLLING_INTERVAL_MSEC,
+ NULL);
return 0;
}
return false;
}
-static void zebra_ns_ready_read(struct thread *t)
+static void zebra_ns_ready_read(struct event *t)
{
- struct zebra_netns_info *zns_info = THREAD_ARG(t);
+ struct zebra_netns_info *zns_info = EVENT_ARG(t);
const char *netnspath;
int err, stop_retry = 0;
zebra_ns_continue_read(zns_info, 1);
}
-static void zebra_ns_notify_read(struct thread *t)
+static void zebra_ns_notify_read(struct event *t)
{
- int fd_monitor = THREAD_FD(t);
+ int fd_monitor = EVENT_FD(t);
struct inotify_event *event;
char buf[BUFSIZ];
ssize_t len;
char event_name[NAME_MAX + 1];
- thread_add_read(zrouter.master, zebra_ns_notify_read, NULL, fd_monitor,
- &zebra_netns_notify_current);
+ event_add_read(zrouter.master, zebra_ns_notify_read, NULL, fd_monitor,
+ &zebra_netns_notify_current);
len = read(fd_monitor, buf, sizeof(buf));
if (len < 0) {
flog_err_sys(EC_ZEBRA_NS_NOTIFY_READ,
sizeof(struct zebra_netns_info));
netnsinfo->retries = ZEBRA_NS_POLLING_MAX_RETRIES;
netnsinfo->netnspath = netnspath;
- thread_add_timer_msec(zrouter.master, zebra_ns_ready_read,
- (void *)netnsinfo, 0, NULL);
+ event_add_timer_msec(zrouter.master, zebra_ns_ready_read,
+ (void *)netnsinfo, 0, NULL);
}
}
"NS notify watch: failed to add watch (%s)",
safe_strerror(errno));
}
- thread_add_read(zrouter.master, zebra_ns_notify_read, NULL, fd_monitor,
- &zebra_netns_notify_current);
+ event_add_read(zrouter.master, zebra_ns_notify_read, NULL, fd_monitor,
+ &zebra_netns_notify_current);
}
void zebra_ns_notify_close(void)
fd = zebra_netns_notify_current->u.fd;
if (zebra_netns_notify_current->master != NULL)
- THREAD_OFF(zebra_netns_notify_current);
+ EVENT_OFF(zebra_netns_notify_current);
/* auto-removal of notify items */
if (fd > 0)
nhe->nhg.nexthop);
}
- THREAD_OFF(nhe->timer);
+ EVENT_OFF(nhe->timer);
zebra_nhg_free_members(nhe);
nhe->nhg.nexthop);
}
- THREAD_OFF(nhe->timer);
+ EVENT_OFF(nhe->timer);
nexthops_free(nhe->nhg.nexthop);
}
}
-static void zebra_nhg_timer(struct thread *thread)
+static void zebra_nhg_timer(struct event *thread)
{
- struct nhg_hash_entry *nhe = THREAD_ARG(thread);
+ struct nhg_hash_entry *nhe = EVENT_ARG(thread);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("Nexthop Timer for nhe: %pNG", nhe);
!CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND)) {
nhe->refcnt = 1;
SET_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND);
- thread_add_timer(zrouter.master, zebra_nhg_timer, nhe,
- zrouter.nhg_keep, &nhe->timer);
+ event_add_timer(zrouter.master, zebra_nhg_timer, nhe,
+ zrouter.nhg_keep, &nhe->timer);
return;
}
nhe->refcnt++;
- if (thread_is_scheduled(nhe->timer)) {
- THREAD_OFF(nhe->timer);
+ if (event_is_scheduled(nhe->timer)) {
+ EVENT_OFF(nhe->timer);
nhe->refcnt--;
UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND);
}
/* Dont call the dec API, we dont want to uninstall the ID */
old->refcnt = 0;
- THREAD_OFF(old->timer);
+ EVENT_OFF(old->timer);
zebra_nhg_free(old);
old = NULL;
}
*/
struct nhg_connected_tree_head nhg_depends, nhg_dependents;
- struct thread *timer;
+ struct event *timer;
/*
* Is this nexthop group valid, ie all nexthops are fully resolved.
*/
struct nlsock netlink_dplane_out;
struct nlsock netlink_dplane_in;
- struct thread *t_netlink;
+ struct event *t_netlink;
#endif
struct route_table *if_table;
struct frr_pthread *pthread;
/* Event-delivery context 'master' for the module */
- struct thread_master *master;
+ struct event_loop *master;
/* Event/'thread' pointer for queued zapi messages */
- struct thread *t_msgs;
+ struct event *t_msgs;
/* Input fifo queue to the module, and lock to protect it. */
pthread_mutex_t mutex;
/* Prototypes */
/* Main event loop, processing incoming message queue */
-static void process_messages(struct thread *event);
+static void process_messages(struct event *event);
static int handle_opq_registration(const struct zmsghdr *hdr,
struct stream *msg);
static int handle_opq_unregistration(const struct zmsghdr *hdr,
atomic_store_explicit(&zo_info.run, 1, memory_order_relaxed);
/* Enqueue an initial event for the pthread */
- thread_add_event(zo_info.master, process_messages, NULL, 0,
- &zo_info.t_msgs);
+ event_add_event(zo_info.master, process_messages, NULL, 0,
+ &zo_info.t_msgs);
/* And start the pthread */
frr_pthread_run(zo_info.pthread, NULL);
if (IS_ZEBRA_DEBUG_RECV && IS_ZEBRA_DEBUG_DETAIL)
zlog_debug("%s: received %u messages",
__func__, counter);
- thread_add_event(zo_info.master, process_messages, NULL, 0,
- &zo_info.t_msgs);
+ event_add_event(zo_info.master, process_messages, NULL, 0,
+ &zo_info.t_msgs);
}
return counter;
/*
* Pthread event loop, process the incoming message queue.
*/
-static void process_messages(struct thread *event)
+static void process_messages(struct event *event)
{
struct stream_fifo fifo;
struct stream *msg;
if (need_resched) {
atomic_fetch_add_explicit(&zo_info.yields, 1,
memory_order_relaxed);
- thread_add_event(zo_info.master, process_messages, NULL, 0,
- &zo_info.t_msgs);
+ event_add_event(zo_info.master, process_messages, NULL, 0,
+ &zo_info.t_msgs);
}
/* This will also free any leftover messages, in the shutdown case */
struct zebra_ptm_cb ptm_cb;
static int zebra_ptm_socket_init(void);
-void zebra_ptm_sock_read(struct thread *thread);
+void zebra_ptm_sock_read(struct event *thread);
static void zebra_ptm_install_commands(void);
static int zebra_ptm_handle_msg_cb(void *arg, void *in_ctxt);
void zebra_bfd_peer_replay_req(void);
free(ptm_cb.in_data);
/* Cancel events. */
- THREAD_OFF(ptm_cb.t_read);
- THREAD_OFF(ptm_cb.t_write);
- THREAD_OFF(ptm_cb.t_timer);
+ EVENT_OFF(ptm_cb.t_read);
+ EVENT_OFF(ptm_cb.t_write);
+ EVENT_OFF(ptm_cb.t_timer);
if (ptm_cb.wb)
buffer_free(ptm_cb.wb);
close(ptm_cb.ptm_sock);
}
-static void zebra_ptm_flush_messages(struct thread *thread)
+static void zebra_ptm_flush_messages(struct event *thread)
{
ptm_cb.t_write = NULL;
ptm_cb.ptm_sock = -1;
zebra_ptm_reset_status(0);
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time, &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
return;
case BUFFER_PENDING:
ptm_cb.t_write = NULL;
- thread_add_write(zrouter.master, zebra_ptm_flush_messages, NULL,
- ptm_cb.ptm_sock, &ptm_cb.t_write);
+ event_add_write(zrouter.master, zebra_ptm_flush_messages, NULL,
+ ptm_cb.ptm_sock, &ptm_cb.t_write);
break;
case BUFFER_EMPTY:
break;
ptm_cb.ptm_sock = -1;
zebra_ptm_reset_status(0);
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time, &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
return -1;
case BUFFER_EMPTY:
- THREAD_OFF(ptm_cb.t_write);
+ EVENT_OFF(ptm_cb.t_write);
break;
case BUFFER_PENDING:
- thread_add_write(zrouter.master, zebra_ptm_flush_messages, NULL,
- ptm_cb.ptm_sock, &ptm_cb.t_write);
+ event_add_write(zrouter.master, zebra_ptm_flush_messages, NULL,
+ ptm_cb.ptm_sock, &ptm_cb.t_write);
break;
}
return 0;
}
-void zebra_ptm_connect(struct thread *t)
+void zebra_ptm_connect(struct event *t)
{
int init = 0;
if (ptm_cb.ptm_sock != -1) {
if (init) {
ptm_cb.t_read = NULL;
- thread_add_read(zrouter.master, zebra_ptm_sock_read,
- NULL, ptm_cb.ptm_sock, &ptm_cb.t_read);
+ event_add_read(zrouter.master, zebra_ptm_sock_read,
+ NULL, ptm_cb.ptm_sock, &ptm_cb.t_read);
zebra_bfd_peer_replay_req();
}
zebra_ptm_send_status_req();
ptm_cb.reconnect_time = ZEBRA_PTM_RECONNECT_TIME_MAX;
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time, &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
} else if (ptm_cb.reconnect_time >= ZEBRA_PTM_RECONNECT_TIME_MAX) {
ptm_cb.reconnect_time = ZEBRA_PTM_RECONNECT_TIME_INITIAL;
}
}
}
-void zebra_ptm_sock_read(struct thread *thread)
+void zebra_ptm_sock_read(struct event *thread)
{
int sock;
int rc;
errno = 0;
- sock = THREAD_FD(thread);
+ sock = EVENT_FD(thread);
if (sock == -1)
return;
ptm_cb.ptm_sock = -1;
zebra_ptm_reset_status(0);
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time,
- &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
return;
}
ptm_cb.t_read = NULL;
- thread_add_read(zrouter.master, zebra_ptm_sock_read, NULL,
- ptm_cb.ptm_sock, &ptm_cb.t_read);
+ event_add_read(zrouter.master, zebra_ptm_sock_read, NULL,
+ ptm_cb.ptm_sock, &ptm_cb.t_read);
}
/* BFD peer/dst register/update */
if (ptm_cb.ptm_sock == -1) {
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time, &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
return;
}
if (ptm_cb.ptm_sock == -1) {
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time, &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
return;
}
if (ptm_cb.ptm_sock == -1) {
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time, &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
return;
}
if (ptm_cb.ptm_sock == -1) {
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time, &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
return 0;
}
struct buffer *wb; /* Buffer of data waiting to be written to ptm. */
- struct thread *t_read; /* Thread for read */
- struct thread *t_write; /* Thread for write */
- struct thread *t_timer; /* Thread for timer */
+ struct event *t_read; /* Thread for read */
+ struct event *t_write; /* Thread for write */
+ struct event *t_timer; /* Thread for timer */
char *out_data;
char *in_data;
void zebra_ptm_init(void);
void zebra_ptm_finish(void);
-void zebra_ptm_connect(struct thread *t);
+void zebra_ptm_connect(struct event *t);
void zebra_ptm_write(struct vty *vty);
int zebra_ptm_get_enable_state(void);
#include "log.h"
#include "memory.h"
-#include "thread.h"
+#include "frrevent.h"
#include "command.h"
#include "vrf.h"
#include "lib/json.h"
static int zebra_pw_enabled(struct zebra_pw *);
static void zebra_pw_install(struct zebra_pw *);
static void zebra_pw_uninstall(struct zebra_pw *);
-static void zebra_pw_install_retry(struct thread *thread);
+static void zebra_pw_install_retry(struct event *thread);
static int zebra_pw_check_reachability(const struct zebra_pw *);
static void zebra_pw_update_status(struct zebra_pw *, int);
dplane_pw_uninstall(pw);
}
- THREAD_OFF(pw->install_retry_timer);
+ EVENT_OFF(pw->install_retry_timer);
/* unlink and release memory */
RB_REMOVE(zebra_pw_head, &zvrf->pseudowires, pw);
pw->vrf_id, pw->ifname, PW_INSTALL_RETRY_INTERVAL);
/* schedule to retry later */
- THREAD_OFF(pw->install_retry_timer);
- thread_add_timer(zrouter.master, zebra_pw_install_retry, pw,
- PW_INSTALL_RETRY_INTERVAL, &pw->install_retry_timer);
+ EVENT_OFF(pw->install_retry_timer);
+ event_add_timer(zrouter.master, zebra_pw_install_retry, pw,
+ PW_INSTALL_RETRY_INTERVAL, &pw->install_retry_timer);
zebra_pw_update_status(pw, pwstatus);
}
-static void zebra_pw_install_retry(struct thread *thread)
+static void zebra_pw_install_retry(struct event *thread)
{
- struct zebra_pw *pw = THREAD_ARG(thread);
+ struct zebra_pw *pw = EVENT_ARG(thread);
zebra_pw_install(pw);
}
const char *ifname;
int idx = 0;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!zvrf)
return CMD_WARNING;
const char *ifname;
int idx = 0;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!zvrf)
return CMD_WARNING;
struct zebra_vrf *zvrf;
struct zebra_pw *pw;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!zvrf)
return 0;
struct nexthop *nexthop;
struct nexthop_group *nhg;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!zvrf)
return;
struct zebra_vrf *zvrf;
struct zebra_pw *pw;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!zvrf)
return;
struct zebra_vrf *zvrf;
struct zebra_pw *pw;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!zvrf)
return 0;
uint8_t protocol;
struct zserv *client;
struct rnh *rnh;
- struct thread *install_retry_timer;
+ struct event *install_retry_timer;
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(zebra_pw);
#include "sockunion.h"
#include "srcdest_table.h"
#include "table.h"
-#include "thread.h"
+#include "frrevent.h"
#include "vrf.h"
#include "workqueue.h"
#include "nexthop_group_private.h"
* Event, list, and mutex for delivery of dataplane results
*/
static pthread_mutex_t dplane_mutex;
-static struct thread *t_dplane;
+static struct event *t_dplane;
static struct dplane_ctx_list_head rib_dplane_q;
DEFINE_HOOK(rib_update, (struct route_node * rn, const char *reason),
{
struct nexthop *nexthop;
struct rib_table_info *info = srcdest_rnode_table_info(rn);
- struct zebra_vrf *zvrf = vrf_info_lookup(re->vrf_id);
+ struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(re->vrf_id);
const struct prefix *p, *src_p;
enum zebra_dplane_result ret;
{
struct nexthop *nexthop;
struct rib_table_info *info = srcdest_rnode_table_info(rn);
- struct zebra_vrf *zvrf = vrf_info_lookup(re->vrf_id);
+ struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(re->vrf_id);
if (info->safi != SAFI_UNICAST) {
UNSET_FLAG(re->status, ROUTE_ENTRY_INSTALLED);
static void zebra_rib_evaluate_mpls(struct route_node *rn)
{
rib_dest_t *dest = rib_dest_from_rnode(rn);
- struct zebra_vrf *zvrf = vrf_info_lookup(VRF_DEFAULT);
+ struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!dest)
return;
struct rib_table_info *info;
bool rt_delete = false;
- zvrf = vrf_info_lookup(dplane_ctx_get_vrf(ctx));
+ zvrf = zebra_vrf_lookup_by_id(dplane_ctx_get_vrf(ctx));
vrf = vrf_lookup_by_id(dplane_ctx_get_vrf(ctx));
/* Locate rn and re(s) from ctx */
if (!w)
return;
- zvrf = vrf_info_lookup(w->vrf_id);
+ zvrf = zebra_vrf_lookup_by_id(w->vrf_id);
if (!zvrf) {
XFREE(MTYPE_WQ_WRAPPER, w);
return;
XFREE(MTYPE_RIB_UPDATE_CTX, *ctx);
}
-static void rib_update_handler(struct thread *thread)
+static void rib_update_handler(struct event *thread)
{
struct rib_update_ctx *ctx;
- ctx = THREAD_ARG(thread);
+ ctx = EVENT_ARG(thread);
rib_update_handle_vrf_all(ctx->event, ZEBRA_ROUTE_ALL);
* Thread list to ensure we don't schedule a ton of events
* if interfaces are flapping for instance.
*/
-static struct thread *t_rib_update_threads[RIB_UPDATE_MAX];
+static struct event *t_rib_update_threads[RIB_UPDATE_MAX];
/* Schedule a RIB update event for all vrfs */
void rib_update(enum rib_update_event event)
{
struct rib_update_ctx *ctx;
- if (thread_is_scheduled(t_rib_update_threads[event]))
+ if (event_is_scheduled(t_rib_update_threads[event]))
return;
ctx = rib_update_ctx_init(0, event);
- thread_add_event(zrouter.master, rib_update_handler, ctx, 0,
- &t_rib_update_threads[event]);
+ event_add_event(zrouter.master, rib_update_handler, ctx, 0,
+ &t_rib_update_threads[event]);
if (IS_ZEBRA_DEBUG_EVENT)
zlog_debug("%s: Scheduled VRF (ALL), event %s", __func__,
}
/* Sweep all RIB tables. */
-void rib_sweep_route(struct thread *t)
+void rib_sweep_route(struct event *t)
{
struct vrf *vrf;
struct zebra_vrf *zvrf;
* Handle results from the dataplane system. Dequeue update context
* structs, dispatch to appropriate internal handlers.
*/
-static void rib_process_dplane_results(struct thread *thread)
+static void rib_process_dplane_results(struct event *thread)
{
struct zebra_dplane_ctx *ctx;
struct dplane_ctx_list_head ctxlist;
}
/* Ensure event is signalled to zebra main pthread */
- thread_add_event(zrouter.master, rib_process_dplane_results, NULL, 0,
- &t_dplane);
+ event_add_event(zrouter.master, rib_process_dplane_results, NULL, 0,
+ &t_dplane);
return 0;
}
#include "log.h"
#include "sockunion.h"
#include "linklist.h"
-#include "thread.h"
+#include "frrevent.h"
#include "workqueue.h"
#include "prefix.h"
#include "routemap.h"
*nht_exists = false;
- zvrf = vrf_info_lookup(vrf_id);
+ zvrf = zebra_vrf_lookup_by_id(vrf_id);
if (!zvrf)
return;
#include "zebra/zebra_routemap_clippy.c"
static uint32_t zebra_rmap_update_timer = ZEBRA_RMAP_DEFAULT_UPDATE_TIMER;
-static struct thread *zebra_t_rmap_update = NULL;
+static struct event *zebra_t_rmap_update = NULL;
char *zebra_import_table_routemap[AFI_MAX][ZEBRA_KERNEL_TABLE_MAX];
struct nh_rmap_obj {
zebra_nht_rm_update(rmap_name);
}
-static void zebra_route_map_update_timer(struct thread *thread)
+static void zebra_route_map_update_timer(struct event *thread)
{
if (IS_ZEBRA_DEBUG_EVENT)
zlog_debug("Event driven route-map update triggered");
if (!value && zebra_t_rmap_update) {
/* Event driven route map updates is being disabled */
/* But there's a pending timer. Fire it off now */
- THREAD_OFF(zebra_t_rmap_update);
+ EVENT_OFF(zebra_t_rmap_update);
zebra_route_map_update_timer(NULL);
}
}
/* Set zebra_rmap_update_timer to 0 so that it wont schedule again */
zebra_rmap_update_timer = 0;
/* Thread off if any scheduled already */
- THREAD_OFF(zebra_t_rmap_update);
+ EVENT_OFF(zebra_t_rmap_update);
route_map_finish();
}
{
/* rmap_update_timer of 0 means don't do route updates */
if (zebra_rmap_update_timer)
- THREAD_OFF(zebra_t_rmap_update);
+ EVENT_OFF(zebra_t_rmap_update);
- thread_add_timer(zrouter.master, zebra_route_map_update_timer,
- NULL, zebra_rmap_update_timer, &zebra_t_rmap_update);
+ event_add_timer(zrouter.master, zebra_route_map_update_timer, NULL,
+ zebra_rmap_update_timer, &zebra_t_rmap_update);
}
static void zebra_route_map_add(const char *rmap_name)
{
struct zebra_router_table *zrt, *tmp;
- THREAD_OFF(zrouter.sweeper);
+ EVENT_OFF(zrouter.sweeper);
RB_FOREACH_SAFE (zrt, zebra_router_table_head, &zrouter.tables, tmp)
zebra_router_free_table(zrt);
/* Free NHE in ID table only since it has unhashable entries as well */
hash_iterate(zrouter.nhgs_id, zebra_nhg_hash_free_zero_id, NULL);
- hash_clean(zrouter.nhgs_id, zebra_nhg_hash_free);
- hash_free(zrouter.nhgs_id);
- hash_clean(zrouter.nhgs, NULL);
- hash_free(zrouter.nhgs);
-
- hash_clean(zrouter.rules_hash, zebra_pbr_rules_free);
- hash_free(zrouter.rules_hash);
-
- hash_clean(zrouter.ipset_entry_hash, zebra_pbr_ipset_entry_free),
- hash_clean(zrouter.ipset_hash, zebra_pbr_ipset_free);
- hash_free(zrouter.ipset_hash);
- hash_free(zrouter.ipset_entry_hash);
- hash_clean(zrouter.iptable_hash, zebra_pbr_iptable_free);
- hash_free(zrouter.iptable_hash);
+ hash_clean_and_free(&zrouter.nhgs_id, zebra_nhg_hash_free);
+ hash_clean_and_free(&zrouter.nhgs, NULL);
+
+ hash_clean_and_free(&zrouter.rules_hash, zebra_pbr_rules_free);
+
+ hash_clean_and_free(&zrouter.ipset_entry_hash,
+ zebra_pbr_ipset_entry_free);
+ hash_clean_and_free(&zrouter.ipset_hash, zebra_pbr_ipset_free);
+ hash_clean_and_free(&zrouter.iptable_hash, zebra_pbr_iptable_free);
#ifdef HAVE_SCRIPTING
zebra_script_destroy();
struct frr_pthread *zebra_pth_mlag;
/* MLAG Thread context 'master' */
- struct thread_master *th_master;
+ struct event_loop *th_master;
/*
* Event for Initial MLAG Connection setup & Data Read
* so no issues.
*
*/
- struct thread *t_read;
+ struct event *t_read;
/* Event for MLAG write */
- struct thread *t_write;
+ struct event *t_write;
};
struct zebra_router {
atomic_bool in_shutdown;
/* Thread master */
- struct thread_master *master;
+ struct event_loop *master;
/* Lists of clients who have connected to us */
struct list *client_list;
* Time for when we sweep the rib from old routes
*/
time_t startup_time;
- struct thread *sweeper;
+ struct event *sweeper;
/*
* The hash of nexthop groups associated with this router
return NULL;
}
-static int zebra_snmp_init(struct thread_master *tm)
+static int zebra_snmp_init(struct event_loop *tm)
{
smux_init(tm);
REGISTER_MIB("mibII/ipforward", zebra_variables, variable, ipfw_oid);
if (re->mtu)
vty_out(vty, ", mtu %u", re->mtu);
if (re->vrf_id != VRF_DEFAULT) {
- zvrf = vrf_info_lookup(re->vrf_id);
+ zvrf = zebra_vrf_lookup_by_id(re->vrf_id);
vty_out(vty, ", vrf %s", zvrf_name(zvrf));
}
if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED))
json_object_string_add(json, "type",
zebra_route_string(nhe->type));
json_object_int_add(json, "refCount", nhe->refcnt);
- if (thread_is_scheduled(nhe->timer))
+ if (event_is_scheduled(nhe->timer))
json_object_string_add(
json, "timeToDeletion",
- thread_timer_to_hhmmss(time_left,
- sizeof(time_left),
- nhe->timer));
+ event_timer_to_hhmmss(time_left,
+ sizeof(time_left),
+ nhe->timer));
json_object_string_add(json, "uptime", up_str);
json_object_string_add(json, "vrf",
vrf_id_to_name(nhe->vrf_id));
vty_out(vty, "ID: %u (%s)\n", nhe->id,
zebra_route_string(nhe->type));
vty_out(vty, " RefCnt: %u", nhe->refcnt);
- if (thread_is_scheduled(nhe->timer))
+ if (event_is_scheduled(nhe->timer))
vty_out(vty, " Time to Deletion: %s",
- thread_timer_to_hhmmss(time_left,
- sizeof(time_left),
- nhe->timer));
+ event_timer_to_hhmmss(time_left,
+ sizeof(time_left),
+ nhe->timer));
vty_out(vty, "\n");
vty_out(vty, " Uptime: %s\n", up_str);
struct zebra_vrf *zvrf = NULL;
int filter = 0;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!zvrf)
return CMD_WARNING;
vni_t vni = strtoul(argv[2]->arg, NULL, 10);
struct zebra_vrf *zvrf = NULL;
- zvrf = vrf_info_lookup(VRF_DEFAULT);
+ zvrf = zebra_vrf_lookup_by_id(VRF_DEFAULT);
if (!zvrf)
return CMD_WARNING;
mac->detect_start_time.tv_sec = 0;
mac->detect_start_time.tv_usec = 0;
mac->dad_dup_detect_time = 0;
- THREAD_OFF(mac->dad_mac_auto_recovery_timer);
+ EVENT_OFF(mac->dad_mac_auto_recovery_timer);
/* warn-only action return */
if (!zvrf->dad_freeze)
nbr->detect_start_time.tv_sec = 0;
nbr->detect_start_time.tv_usec = 0;
nbr->dad_dup_detect_time = 0;
- THREAD_OFF(nbr->dad_ip_auto_recovery_timer);
+ EVENT_OFF(nbr->dad_ip_auto_recovery_timer);
if (!!CHECK_FLAG(nbr->flags, ZEBRA_NEIGH_LOCAL)) {
zebra_evpn_neigh_send_add_to_client(zevpn->vni, ip, &nbr->emac,
mac->detect_start_time.tv_sec = 0;
mac->detect_start_time.tv_usec = 0;
mac->dad_dup_detect_time = 0;
- THREAD_OFF(mac->dad_mac_auto_recovery_timer);
+ EVENT_OFF(mac->dad_mac_auto_recovery_timer);
/* Remove all IPs as duplicate associcated with this MAC */
for (ALL_LIST_ELEMENTS_RO(mac->neigh_list, node, nbr)) {
void zebra_vxlan_vni_table_destroy(struct hash *vni_table)
{
- if (vni_table) {
- hash_clean(vni_table, zebra_vxlan_vni_free);
- hash_free(vni_table);
- }
+ hash_clean_and_free(&vni_table, zebra_vxlan_vni_free);
}
int zebra_vxlan_if_vni_table_destroy(struct zebra_if *zif)
#include "lib/sockopt.h" /* for setsockopt_so_recvbuf, setsockopt... */
#include "lib/sockunion.h" /* for sockopt_reuseaddr, sockopt_reuseport */
#include "lib/stream.h" /* for STREAM_SIZE, stream (ptr only), ... */
-#include "lib/thread.h" /* for thread (ptr only), THREAD_ARG, ... */
+#include "frrevent.h" /* for thread (ptr only), EVENT_ARG, ... */
#include "lib/vrf.h" /* for vrf_info_lookup, VRF_DEFAULT */
#include "lib/vty.h" /* for vty_out, vty (ptr only) */
#include "lib/zclient.h" /* for zmsghdr, ZEBRA_HEADER_SIZE, ZEBRA... */
/*
* Zebra server event driver for all client threads.
*
- * This is essentially a wrapper around thread_add_event() that centralizes
+ * This is essentially a wrapper around event_add_event() that centralizes
* those scheduling calls into one place.
*
* All calls to this function schedule an event on the pthread running the
/*
* Zebra server event driver for the main thread.
*
- * This is essentially a wrapper around thread_add_event() that centralizes
+ * This is essentially a wrapper around event_add_event() that centralizes
* those scheduling calls into one place.
*
* All calls to this function schedule an event on Zebra's main pthread.
atomic_store_explicit(&client->pthread->running, false,
memory_order_relaxed);
- THREAD_OFF(client->t_read);
- THREAD_OFF(client->t_write);
+ EVENT_OFF(client->t_read);
+ EVENT_OFF(client->t_write);
zserv_event(client, ZSERV_HANDLE_CLIENT_FAIL);
}
* allows us to expose information about input and output queues to the user in
* terms of number of packets rather than size of data.
*/
-static void zserv_write(struct thread *thread)
+static void zserv_write(struct event *thread)
{
- struct zserv *client = THREAD_ARG(thread);
+ struct zserv *client = EVENT_ARG(thread);
struct stream *msg;
uint32_t wcmd = 0;
struct stream_fifo *cache;
*
* Any failure in any of these actions is handled by terminating the client.
*/
-static void zserv_read(struct thread *thread)
+static void zserv_read(struct event *thread)
{
- struct zserv *client = THREAD_ARG(thread);
+ struct zserv *client = EVENT_ARG(thread);
int sock;
size_t already;
struct stream_fifo *cache;
memory_order_relaxed);
cache = stream_fifo_new();
p2p = p2p_orig;
- sock = THREAD_FD(thread);
+ sock = EVENT_FD(thread);
while (p2p) {
ssize_t nb;
{
switch (event) {
case ZSERV_CLIENT_READ:
- thread_add_read(client->pthread->master, zserv_read, client,
- client->sock, &client->t_read);
+ event_add_read(client->pthread->master, zserv_read, client,
+ client->sock, &client->t_read);
break;
case ZSERV_CLIENT_WRITE:
- thread_add_write(client->pthread->master, zserv_write, client,
- client->sock, &client->t_write);
+ event_add_write(client->pthread->master, zserv_write, client,
+ client->sock, &client->t_write);
break;
}
}
* rely on the read thread to handle queuing this task enough times to process
* everything on the input queue.
*/
-static void zserv_process_messages(struct thread *thread)
+static void zserv_process_messages(struct event *thread)
{
- struct zserv *client = THREAD_ARG(thread);
+ struct zserv *client = EVENT_ARG(thread);
struct stream *msg;
struct stream_fifo *cache = stream_fifo_new();
uint32_t p2p = zrouter.packets_to_process;
* - Free associated resources
* - Free client structure
*
- * This does *not* take any action on the struct thread * fields. These are
+ * This does *not* take any action on the struct event * fields. These are
* managed by the owning pthread and any tasks associated with them must have
* been stopped prior to invoking this function.
*/
zlog_debug("Closing client '%s'",
zebra_route_string(client->proto));
- thread_cancel_event(zrouter.master, client);
- THREAD_OFF(client->t_cleanup);
- THREAD_OFF(client->t_process);
+ event_cancel_event(zrouter.master, client);
+ EVENT_OFF(client->t_cleanup);
+ EVENT_OFF(client->t_process);
/* destroy pthread */
frr_pthread_destroy(client->pthread);
* already have been closed and the thread will most likely have died, but its
* resources still need to be cleaned up.
*/
-static void zserv_handle_client_fail(struct thread *thread)
+static void zserv_handle_client_fail(struct event *thread)
{
- struct zserv *client = THREAD_ARG(thread);
+ struct zserv *client = EVENT_ARG(thread);
zserv_close_client(client);
}
* main pthread.
*/
if (client->is_closed)
- thread_add_event(zrouter.master,
- zserv_handle_client_fail,
- client, 0, &client->t_cleanup);
+ event_add_event(zrouter.master,
+ zserv_handle_client_fail,
+ client, 0, &client->t_cleanup);
}
}
/*
* Accept socket connection.
*/
-static void zserv_accept(struct thread *thread)
+static void zserv_accept(struct event *thread)
{
int accept_sock;
int client_sock;
struct sockaddr_in client;
socklen_t len;
- accept_sock = THREAD_FD(thread);
+ accept_sock = EVENT_FD(thread);
/* Reregister myself. */
zserv_event(NULL, ZSERV_ACCEPT);
{
switch (event) {
case ZSERV_ACCEPT:
- thread_add_read(zrouter.master, zserv_accept, NULL, zsock,
- NULL);
+ event_add_read(zrouter.master, zserv_accept, NULL, zsock, NULL);
break;
case ZSERV_PROCESS_MESSAGES:
- thread_add_event(zrouter.master, zserv_process_messages, client,
- 0, &client->t_process);
+ event_add_event(zrouter.master, zserv_process_messages, client,
+ 0, &client->t_process);
break;
case ZSERV_HANDLE_CLIENT_FAIL:
- thread_add_event(zrouter.master, zserv_handle_client_fail,
- client, 0, &client->t_cleanup);
+ event_add_event(zrouter.master, zserv_handle_client_fail,
+ client, 0, &client->t_cleanup);
}
}
if (info->t_stale_removal) {
vty_out(vty,
"Stale delete timer: %ld sec\n",
- thread_timer_remain_second(
+ event_timer_remain_second(
info->t_stale_removal));
}
}
#include "lib/vrf.h" /* for vrf_bitmap_t */
#include "lib/zclient.h" /* for redist_proto */
#include "lib/stream.h" /* for stream, stream_fifo */
-#include "lib/thread.h" /* for thread, thread_master */
+#include "frrevent.h" /* for thread, thread_master */
#include "lib/linklist.h" /* for list */
#include "lib/workqueue.h" /* for work_queue */
#include "lib/hook.h" /* for DECLARE_HOOK, DECLARE_KOOH */
/* Book keeping */
struct prefix *current_prefix;
void *stale_client_ptr;
- struct thread *t_stale_removal;
+ struct event *t_stale_removal;
TAILQ_ENTRY(client_gr_info) gr_info;
};
struct buffer *wb;
/* Threads for read/write. */
- struct thread *t_read;
- struct thread *t_write;
+ struct event *t_read;
+ struct event *t_write;
/* Event for message processing, for the main pthread */
- struct thread *t_process;
+ struct event *t_process;
/* Event for the main pthread */
- struct thread *t_cleanup;
+ struct event *t_cleanup;
/* This client's redistribute flag. */
struct redist_proto mi_redist[AFI_MAX][ZEBRA_ROUTE_MAX];
struct zmsghdr *hdr);
/* TODO */
-__attribute__((__noreturn__)) void zebra_finalize(struct thread *event);
+__attribute__((__noreturn__)) void zebra_finalize(struct event *event);
/*
* Graceful restart functions.