}
/* Threads. */
- thread_add_read(master, babel_read_protocol, NULL, protocol_socket, &babel_routing_process->t_read);
+ event_add_read(master, babel_read_protocol, NULL, protocol_socket,
+ &babel_routing_process->t_read);
/* wait a little: zebra will announce interfaces, addresses, routes... */
- thread_add_timer_msec(master, babel_init_routing_process, NULL, 200L, &babel_routing_process->t_update);
+ event_add_timer_msec(master, babel_init_routing_process, NULL, 200L,
+ &babel_routing_process->t_update);
/* Distribute list install. */
babel_routing_process->distribute_ctx = distribute_list_ctx_create (vrf_lookup_by_id(VRF_DEFAULT));
}
/* re-add thread */
- thread_add_read(master, &babel_read_protocol, NULL, protocol_socket, &babel_routing_process->t_read);
+ event_add_read(master, &babel_read_protocol, NULL, protocol_socket,
+ &babel_routing_process->t_read);
}
/* Zebra will give some information, especially about interfaces. This function
{
long msecs = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;
thread_cancel(&(babel_routing_process->t_update));
- thread_add_timer_msec(master, babel_main_loop, NULL, msecs, &babel_routing_process->t_update);
+ event_add_timer_msec(master, babel_main_loop, NULL, msecs,
+ &babel_routing_process->t_update);
}
void
bvrf->bg_echov6 = bp_echov6_socket(vrf);
if (!bvrf->bg_ev[0] && bvrf->bg_shop != -1)
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
- &bvrf->bg_ev[0]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
+ &bvrf->bg_ev[0]);
if (!bvrf->bg_ev[1] && bvrf->bg_mhop != -1)
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
- &bvrf->bg_ev[1]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
+ &bvrf->bg_ev[1]);
if (!bvrf->bg_ev[2] && bvrf->bg_shop6 != -1)
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
- &bvrf->bg_ev[2]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
+ &bvrf->bg_ev[2]);
if (!bvrf->bg_ev[3] && bvrf->bg_mhop6 != -1)
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
- &bvrf->bg_ev[3]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
+ &bvrf->bg_ev[3]);
if (!bvrf->bg_ev[4] && bvrf->bg_echo != -1)
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
- &bvrf->bg_ev[4]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
+ &bvrf->bg_ev[4]);
if (!bvrf->bg_ev[5] && bvrf->bg_echov6 != -1)
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
- &bvrf->bg_ev[5]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
+ &bvrf->bg_ev[5]);
if (vrf->vrf_id != VRF_DEFAULT) {
bfdd_zclient_register(vrf->vrf_id);
{
if (sd == bvrf->bg_shop) {
THREAD_OFF(bvrf->bg_ev[0]);
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
- &bvrf->bg_ev[0]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
+ &bvrf->bg_ev[0]);
} else if (sd == bvrf->bg_mhop) {
THREAD_OFF(bvrf->bg_ev[1]);
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
- &bvrf->bg_ev[1]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
+ &bvrf->bg_ev[1]);
} else if (sd == bvrf->bg_shop6) {
THREAD_OFF(bvrf->bg_ev[2]);
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
- &bvrf->bg_ev[2]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
+ &bvrf->bg_ev[2]);
} else if (sd == bvrf->bg_mhop6) {
THREAD_OFF(bvrf->bg_ev[3]);
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
- &bvrf->bg_ev[3]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
+ &bvrf->bg_ev[3]);
} else if (sd == bvrf->bg_echo) {
THREAD_OFF(bvrf->bg_ev[4]);
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
- &bvrf->bg_ev[4]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
+ &bvrf->bg_ev[4]);
} else if (sd == bvrf->bg_echov6) {
THREAD_OFF(bvrf->bg_ev[5]);
- thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
- &bvrf->bg_ev[5]);
+ event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
+ &bvrf->bg_ev[5]);
}
}
/* Initialize zebra connection. */
bfdd_zclient_init(&bglobal.bfdd_privs);
- thread_add_read(master, control_accept, NULL, bglobal.bg_csock,
- &bglobal.bg_csockev);
+ event_add_read(master, control_accept, NULL, bglobal.bg_csock,
+ &bglobal.bg_csockev);
/* Install commands. */
bfdd_vty_init();
control_new(csock);
- thread_add_read(master, control_accept, NULL, sd, &bglobal.bg_csockev);
+ event_add_read(master, control_accept, NULL, sd, &bglobal.bg_csockev);
}
bcs->bcs_notify = 0;
bcs->bcs_sd = sd;
- thread_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
+ event_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
TAILQ_INIT(&bcs->bcs_bcqueue);
TAILQ_INIT(&bcs->bcs_bnplist);
bcs->bcs_bout = &bcq->bcq_bcb;
bcs->bcs_outev = NULL;
- thread_add_write(master, control_write, bcs, bcs->bcs_sd,
- &bcs->bcs_outev);
+ event_add_write(master, control_write, bcs, bcs->bcs_sd,
+ &bcs->bcs_outev);
return 1;
bcs->bcs_bout = bcb;
/* New messages, active write events. */
- thread_add_write(master, control_write, bcs, bcs->bcs_sd,
- &bcs->bcs_outev);
+ event_add_write(master, control_write, bcs, bcs->bcs_sd,
+ &bcs->bcs_outev);
}
return 0;
schedule_next_read:
bcs->bcs_ev = NULL;
- thread_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
+ event_add_read(master, control_read, bcs, sd, &bcs->bcs_ev);
}
static void control_write(struct event *t)
if (bwrite < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) {
bcs->bcs_outev = NULL;
- thread_add_write(master, control_write, bcs,
- bcs->bcs_sd, &bcs->bcs_outev);
+ event_add_write(master, control_write, bcs, bcs->bcs_sd,
+ &bcs->bcs_outev);
return;
}
bcb->bcb_left -= bwrite;
if (bcb->bcb_left > 0) {
bcs->bcs_outev = NULL;
- thread_add_write(master, control_write, bcs, bcs->bcs_sd,
- &bcs->bcs_outev);
+ event_add_write(master, control_write, bcs, bcs->bcs_sd,
+ &bcs->bcs_outev);
return;
}
/* Schedule if it is not yet. */
if (bdc->outbufev == NULL)
- thread_add_write(master, bfd_dplane_write, bdc, bdc->sock,
- &bdc->outbufev);
+ event_add_write(master, bfd_dplane_write, bdc, bdc->sock,
+ &bdc->outbufev);
return 0;
}
return;
stream_pulldown(bdc->inbuf);
- thread_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
+ event_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
}
static void _bfd_session_register_dplane(struct hash_bucket *hb, void *arg)
if (sock == -1)
return bdc;
- thread_add_read(master, bfd_dplane_read, bdc, sock, &bdc->inbufev);
+ event_add_read(master, bfd_dplane_read, bdc, sock, &bdc->inbufev);
/* Register all unattached sessions. */
bfd_key_iterate(_bfd_session_register_dplane, bdc);
socket_close(&bdc->sock);
THREAD_OFF(bdc->inbufev);
THREAD_OFF(bdc->outbufev);
- thread_add_timer(master, bfd_dplane_client_connect, bdc, 3,
- &bdc->connectev);
+ event_add_timer(master, bfd_dplane_client_connect, bdc, 3,
+ &bdc->connectev);
return;
}
zlog_debug("%s: new data plane client connected", __func__);
reschedule_and_return:
- thread_add_read(master, bfd_dplane_accept, bg, bg->bg_dplane_sock,
- &bglobal.bg_dplane_sockev);
+ event_add_read(master, bfd_dplane_accept, bg, bg->bg_dplane_sock,
+ &bglobal.bg_dplane_sockev);
}
/*
stream_reset(bdc->outbuf);
/* Ask for read notifications. */
- thread_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
+ event_add_read(master, bfd_dplane_read, bdc, bdc->sock, &bdc->inbufev);
/* Remove all sessions then register again to send them all. */
bfd_key_iterate(_bfd_session_unregister_dplane, bdc);
/* If we are not connected yet, ask for write notifications. */
bdc->connecting = true;
- thread_add_write(master, bfd_dplane_write, bdc, bdc->sock,
- &bdc->outbufev);
+ event_add_write(master, bfd_dplane_write, bdc, bdc->sock,
+ &bdc->outbufev);
} else {
if (bglobal.debug_dplane)
zlog_debug("%s: server connection: %d", __func__, sock);
THREAD_OFF(bdc->inbufev);
THREAD_OFF(bdc->outbufev);
socket_close(&sock);
- thread_add_timer(master, bfd_dplane_client_connect, bdc, 3,
- &bdc->connectev);
+ event_add_timer(master, bfd_dplane_client_connect, bdc, 3,
+ &bdc->connectev);
}
static void bfd_dplane_client_init(const struct sockaddr *sa, socklen_t salen)
bdc->client = true;
- thread_add_timer(master, bfd_dplane_client_connect, bdc, 0,
- &bdc->connectev);
+ event_add_timer(master, bfd_dplane_client_connect, bdc, 0,
+ &bdc->connectev);
/* Insert into data plane lists. */
TAILQ_INSERT_TAIL(&bglobal.bg_dplaneq, bdc, entry);
}
bglobal.bg_dplane_sock = sock;
- thread_add_read(master, bfd_dplane_accept, &bglobal, sock,
- &bglobal.bg_dplane_sockev);
+ event_add_read(master, bfd_dplane_accept, &bglobal, sock,
+ &bglobal.bg_dplane_sockev);
}
int bfd_dplane_add_session(struct bfd_session *bs)
tv_normalize(&tv);
- thread_add_timer_tv(master, bfd_recvtimer_cb, bs, &tv,
- &bs->recvtimer_ev);
+ event_add_timer_tv(master, bfd_recvtimer_cb, bs, &tv,
+ &bs->recvtimer_ev);
}
void bfd_echo_recvtimer_update(struct bfd_session *bs)
tv_normalize(&tv);
- thread_add_timer_tv(master, bfd_echo_recvtimer_cb, bs, &tv,
- &bs->echo_recvtimer_ev);
+ event_add_timer_tv(master, bfd_echo_recvtimer_cb, bs, &tv,
+ &bs->echo_recvtimer_ev);
}
void bfd_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
tv_normalize(&tv);
- thread_add_timer_tv(master, bfd_xmt_cb, bs, &tv, &bs->xmttimer_ev);
+ event_add_timer_tv(master, bfd_xmt_cb, bs, &tv, &bs->xmttimer_ev);
}
void bfd_echo_xmttimer_update(struct bfd_session *bs, uint64_t jitter)
tv_normalize(&tv);
- thread_add_timer_tv(master, bfd_echo_xmt_cb, bs, &tv,
- &bs->echo_xmttimer_ev);
+ event_add_timer_tv(master, bfd_echo_xmt_cb, bs, &tv,
+ &bs->echo_xmttimer_ev);
}
void bfd_recvtimer_delete(struct bfd_session *bs)
struct timeval tv;
if (bt->stat_msec)
- thread_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
- &bt->t_stats);
+ event_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
+ &bt->t_stats);
gettimeofday(&tv, NULL);
return;
}
- thread_add_read(bm->master, bmp_read, bmp, bmp->socket, &bmp->t_read);
+ event_add_read(bm->master, bmp_read, bmp, bmp->socket, &bmp->t_read);
}
static struct bmp *bmp_open(struct bmp_targets *bt, int bmp_sock)
bmp->state = BMP_PeerUp;
bmp->pullwr = pullwr_new(bm->master, bmp_sock, bmp, bmp_wrfill,
bmp_wrerr);
- thread_add_read(bm->master, bmp_read, bmp, bmp_sock, &bmp->t_read);
+ event_add_read(bm->master, bmp_read, bmp, bmp_sock, &bmp->t_read);
bmp_send_initiation(bmp);
return bmp;
int bmp_sock;
/* We continue hearing BMP socket. */
- thread_add_read(bm->master, bmp_accept, bl, bl->sock, &bl->t_accept);
+ event_add_read(bm->master, bmp_accept, bl, bl->sock, &bl->t_accept);
memset(&su, 0, sizeof(union sockunion));
goto out_sock;
bl->sock = sock;
- thread_add_read(bm->master, bmp_accept, bl, sock, &bl->t_accept);
+ event_add_read(bm->master, bmp_accept, bl, sock, &bl->t_accept);
return;
out_sock:
close(sock);
ba->curretry = ba->maxretry;
if (ba->socket == -1)
- thread_add_timer_msec(bm->master, bmp_active_thread, ba,
- ba->curretry, &ba->t_timer);
+ event_add_timer_msec(bm->master, bmp_active_thread, ba,
+ ba->curretry, &ba->t_timer);
else {
- thread_add_read(bm->master, bmp_active_thread, ba, ba->socket,
- &ba->t_read);
- thread_add_write(bm->master, bmp_active_thread, ba, ba->socket,
+ event_add_read(bm->master, bmp_active_thread, ba, ba->socket,
+ &ba->t_read);
+ event_add_write(bm->master, bmp_active_thread, ba, ba->socket,
&ba->t_write);
}
}
bt->stat_msec = BMP_STAT_DEFAULT_TIMER;
if (bt->stat_msec)
- thread_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
- &bt->t_stats);
+ event_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
+ &bt->t_stats);
return CMD_SUCCESS;
}
bgp = THREAD_ARG(t);
assert(bgp);
- thread_add_timer(bm->master, bgp_conditional_adv_timer, bgp,
- bgp->condition_check_period, &bgp->t_condition_check);
+ event_add_timer(bm->master, bgp_conditional_adv_timer, bgp,
+ bgp->condition_check_period, &bgp->t_condition_check);
/* loop through each peer and check if we have peers with
* advmap_table_change attribute set, to make sure we send
/* Register for conditional routes polling timer */
if (!thread_is_scheduled(bgp->t_condition_check))
- thread_add_timer(bm->master, bgp_conditional_adv_timer, bgp, 0,
- &bgp->t_condition_check);
+ event_add_timer(bm->master, bgp_conditional_adv_timer, bgp, 0,
+ &bgp->t_condition_check);
}
void bgp_conditional_adv_disable(struct peer *peer, afi_t afi, safi_t safi)
struct bgp_damp_config *bdc = THREAD_ARG(t);
bdc->t_reuse = NULL;
- thread_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
- &bdc->t_reuse);
+ event_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
+ &bdc->t_reuse);
t_now = monotime(NULL);
bgp_damp_parameter_set(half, reuse, suppress, max, bdc);
/* Register reuse timer. */
- thread_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
- &bdc->t_reuse);
+ event_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE,
+ &bdc->t_reuse);
return 0;
}
interval = interval
- secs_into_day % interval; /* always > 0 */
}
- thread_add_timer(bm->master, bgp_dump_interval_func, bgp_dump,
- interval, &bgp_dump->t_interval);
+ event_add_timer(bm->master, bgp_dump_interval_func, bgp_dump,
+ interval, &bgp_dump->t_interval);
} else {
/* One-off dump: execute immediately, don't affect any scheduled
* dumps */
- thread_add_event(bm->master, bgp_dump_interval_func, bgp_dump,
- 0, &bgp_dump->t_interval);
+ event_add_event(bm->master, bgp_dump_interval_func, bgp_dump, 0,
+ &bgp_dump->t_interval);
}
return 0;
if (BGP_DEBUG(evpn_mh, EVPN_MH_ES))
zlog_debug("periodic consistency checking started");
- thread_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
- BGP_EVPN_CONS_CHECK_INTERVAL,
- &bgp_mh_info->t_cons_check);
+ event_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
+ BGP_EVPN_CONS_CHECK_INTERVAL,
+ &bgp_mh_info->t_cons_check);
}
/* queue up the es for background consistency checks */
}
/* restart the timer */
- thread_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
+ event_add_timer(bm->master, bgp_evpn_run_consistency_checks, NULL,
BGP_EVPN_CONS_CHECK_INTERVAL,
&bgp_mh_info->t_cons_check);
}
bgp_reads_on(peer);
bgp_writes_on(peer);
- thread_add_event(bm->master, bgp_process_packet, peer, 0,
- &peer->t_process_packet);
+ event_add_event(bm->master, bgp_process_packet, peer, 0,
+ &peer->t_process_packet);
return (peer);
}
peer->synctime = monotime(NULL);
- thread_add_timer_msec(bm->master, bgp_generate_updgrp_packets, peer, 0,
- &peer->t_generate_updgrp_packets);
+ event_add_timer_msec(bm->master, bgp_generate_updgrp_packets, peer, 0,
+ &peer->t_generate_updgrp_packets);
/* MRAI timer will be started again when FIFO is built, no need to
* do it here.
bgp_set_llgr_stale(peer, afi, safi);
bgp_clear_stale_route(peer, afi, safi);
- thread_add_timer(bm->master,
- bgp_llgr_stale_timer_expire, paf,
- peer->llgr[afi][safi].stale_time,
- &peer->t_llgr_stale[afi][safi]);
+ event_add_timer(bm->master, bgp_llgr_stale_timer_expire,
+ paf, peer->llgr[afi][safi].stale_time,
+ &peer->t_llgr_stale[afi][safi]);
for (ALL_LIST_ELEMENTS(peer->bgp->peer, node, nnode,
tmp_peer))
zlog_info("Begin maxmed onstartup mode - timer %d seconds",
bgp->v_maxmed_onstartup);
- thread_add_timer(bm->master, bgp_maxmed_onstartup_timer, bgp,
- bgp->v_maxmed_onstartup, &bgp->t_maxmed_onstartup);
+ event_add_timer(bm->master, bgp_maxmed_onstartup_timer, bgp,
+ bgp->v_maxmed_onstartup, &bgp->t_maxmed_onstartup);
if (!bgp->v_maxmed_admin) {
bgp->maxmed_active = 1;
peer->update_delay_over = 0;
/* Start the update-delay timer */
- thread_add_timer(bm->master, bgp_update_delay_timer, bgp,
- bgp->v_update_delay, &bgp->t_update_delay);
+ event_add_timer(bm->master, bgp_update_delay_timer, bgp,
+ bgp->v_update_delay, &bgp->t_update_delay);
if (bgp->v_establish_wait != bgp->v_update_delay)
- thread_add_timer(bm->master, bgp_establish_wait_timer, bgp,
- bgp->v_establish_wait, &bgp->t_establish_wait);
+ event_add_timer(bm->master, bgp_establish_wait_timer, bgp,
+ bgp->v_establish_wait, &bgp->t_establish_wait);
frr_timestamp(3, bgp->update_delay_begin_time,
sizeof(bgp->update_delay_begin_time));
* bgp_connect_check() as the handler for each and cancel the
* unused event in that function.
*/
- thread_add_read(bm->master, bgp_connect_check, peer, peer->fd,
- &peer->t_connect_check_r);
- thread_add_write(bm->master, bgp_connect_check, peer, peer->fd,
- &peer->t_connect_check_w);
+ event_add_read(bm->master, bgp_connect_check, peer, peer->fd,
+ &peer->t_connect_check_r);
+ event_add_write(bm->master, bgp_connect_check, peer, peer->fd,
+ &peer->t_connect_check_w);
break;
}
return BGP_FSM_SUCCESS;
thread_info->safi = safi;
thread_info->bgp = bgp;
- thread_add_timer(bm->master, bgp_graceful_deferral_timer_expire,
- thread_info, bgp->select_defer_time,
- &gr_info->t_select_deferral);
+ event_add_timer(bm->master, bgp_graceful_deferral_timer_expire,
+ thread_info, bgp->select_defer_time,
+ &gr_info->t_select_deferral);
}
gr_info->eor_required++;
/* Send message to RIB indicating route update pending */
#define BGP_TIMER_ON(T, F, V) \
do { \
if ((peer->status != Deleted)) \
- thread_add_timer(bm->master, (F), peer, (V), &(T)); \
+ event_add_timer(bm->master, (F), peer, (V), &(T)); \
} while (0)
#define BGP_EVENT_ADD(P, E) \
do { \
if ((P)->status != Deleted) \
- thread_add_event(bm->master, bgp_event, (P), (E), \
- NULL); \
+ event_add_event(bm->master, bgp_event, (P), (E), \
+ NULL); \
} while (0)
#define BGP_EVENT_FLUSH(P) \
thread_cancel_event_ready(bm->master, (P)); \
} while (0)
-#define BGP_UPDATE_GROUP_TIMER_ON(T, F) \
- do { \
- if (BGP_SUPPRESS_FIB_ENABLED(peer->bgp) && \
- PEER_ROUTE_ADV_DELAY(peer)) \
- thread_add_timer_msec(bm->master, (F), peer, \
- (BGP_DEFAULT_UPDATE_ADVERTISEMENT_TIME * 1000),\
- (T)); \
- else \
- thread_add_timer_msec(bm->master, (F), peer, \
- 0, (T)); \
- } while (0) \
+#define BGP_UPDATE_GROUP_TIMER_ON(T, F) \
+ do { \
+ if (BGP_SUPPRESS_FIB_ENABLED(peer->bgp) && \
+ PEER_ROUTE_ADV_DELAY(peer)) \
+ event_add_timer_msec( \
+ bm->master, (F), peer, \
+ (BGP_DEFAULT_UPDATE_ADVERTISEMENT_TIME * \
+ 1000), \
+ (T)); \
+ else \
+ event_add_timer_msec(bm->master, (F), peer, 0, (T)); \
+ } while (0)
#define BGP_MSEC_JITTER 10
assert(!peer->t_connect_check_w);
assert(peer->fd);
- thread_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
- &peer->t_write);
+ event_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
+ &peer->t_write);
SET_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON);
}
assert(!peer->t_connect_check_w);
assert(peer->fd);
- thread_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
- &peer->t_read);
+ event_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
+ &peer->t_read);
SET_FLAG(peer->thread_flags, PEER_THREAD_READS_ON);
}
* sent in the update message
*/
if (reschedule) {
- thread_add_write(fpt->master, bgp_process_writes, peer,
- peer->fd, &peer->t_write);
+ event_add_write(fpt->master, bgp_process_writes, peer, peer->fd,
+ &peer->t_write);
} else if (!fatal) {
BGP_UPDATE_GROUP_TIMER_ON(&peer->t_generate_updgrp_packets,
bgp_generate_updgrp_packets);
/* Handle the error in the main pthread, include the
* specific state change from 'bgp_read'.
*/
- thread_add_event(bm->master, bgp_packet_process_error,
- peer, code, &peer->t_process_packet_error);
+ event_add_event(bm->master, bgp_packet_process_error, peer,
+ code, &peer->t_process_packet_error);
goto done;
}
if (!ibuf_full)
assert(ringbuf_space(peer->ibuf_work) >= peer->max_packet_size);
- thread_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
- &peer->t_read);
+ event_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
+ &peer->t_read);
if (added_pkt)
- thread_add_event(bm->master, bgp_process_packet, peer, 0,
- &peer->t_process_packet);
+ event_add_event(bm->master, bgp_process_packet, peer, 0,
+ &peer->t_process_packet);
}
/*
return;
}
- thread_add_read(bm->master, bgp_accept, listener, accept_sock,
- &listener->thread);
+ event_add_read(bm->master, bgp_accept, listener, accept_sock,
+ &listener->thread);
/* Accept client connection. */
bgp_sock = sockunion_accept(accept_sock, &su);
listener->bgp = bgp;
memcpy(&listener->su, sa, salen);
- thread_add_read(bm->master, bgp_accept, listener, sock,
- &listener->thread);
+ event_add_read(bm->master, bgp_accept, listener, sock,
+ &listener->thread);
listnode_add(bm->listen_sockets, listener);
return 0;
return;
if (bnc->ifindex)
- thread_add_event(bm->master, bgp_nht_ifp_initial, bnc->bgp,
- bnc->ifindex, NULL);
+ event_add_event(bm->master, bgp_nht_ifp_initial, bnc->bgp,
+ bnc->ifindex, NULL);
}
void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
}
if (peer_established(peer))
- thread_add_timer(bm->master,
- bgp_refresh_stalepath_timer_expire,
- paf, peer->bgp->stalepath_time,
- &peer->t_refresh_stalepath);
+ event_add_timer(bm->master,
+ bgp_refresh_stalepath_timer_expire, paf,
+ peer->bgp->stalepath_time,
+ &peer->t_refresh_stalepath);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
frr_with_mutex (&peer->io_mtx) {
// more work to do, come back later
if (peer->ibuf->count > 0)
- thread_add_event(
- bm->master, bgp_process_packet, peer, 0,
- &peer->t_process_packet);
+ event_add_event(bm->master, bgp_process_packet,
+ peer, 0,
+ &peer->t_process_packet);
}
}
}
if (!bgp->t_rmap_def_originate_eval) {
bgp_lock(bgp);
- thread_add_timer(
+ event_add_timer(
bm->master,
update_group_refresh_default_originate_route_map,
bgp, RMAP_DEFAULT_ORIGINATE_EVAL_TIMER,
/* If there are more routes to be processed, start the
* selection timer
*/
- thread_add_timer(bm->master, bgp_route_select_timer_expire, thread_info,
+ event_add_timer(bm->master, bgp_route_select_timer_expire, thread_info,
BGP_ROUTE_SELECT_DELAY,
&bgp->gr_info[afi][safi].t_route_select);
}
* multiple peers and the announcement doesn't happen in the
* vty context.
*/
- thread_add_timer_msec(bm->master, bgp_announce_route_timer_expired, paf,
- (subgrp->peer_count == 1)
- ? BGP_ANNOUNCE_ROUTE_SHORT_DELAY_MS
- : BGP_ANNOUNCE_ROUTE_DELAY_MS,
- &paf->t_announce_route);
+ event_add_timer_msec(bm->master, bgp_announce_route_timer_expired, paf,
+ (subgrp->peer_count == 1)
+ ? BGP_ANNOUNCE_ROUTE_SHORT_DELAY_MS
+ : BGP_ANNOUNCE_ROUTE_DELAY_MS,
+ &paf->t_announce_route);
}
/*
*/
if (dest || table->soft_reconfig_init) {
table->soft_reconfig_init = false;
- thread_add_event(bm->master, bgp_soft_reconfig_table_task,
- table, 0, &table->soft_reconfig_thread);
+ event_add_event(bm->master, bgp_soft_reconfig_table_task, table,
+ 0, &table->soft_reconfig_thread);
return;
}
/* we're done, clean up the background iteration context info and
bgp_soft_reconfig_table_flag(table, true);
if (!table->soft_reconfig_thread)
- thread_add_event(bm->master,
- bgp_soft_reconfig_table_task, table, 0,
- &table->soft_reconfig_thread);
+ event_add_event(bm->master,
+ bgp_soft_reconfig_table_task, table, 0,
+ &table->soft_reconfig_thread);
/* Cancel bgp_announce_route_timer_expired threads.
* bgp_announce_route_timer_expired threads have been scheduled
* to announce routes as soon as the soft_reconfigure process
/* rmap_update_timer of 0 means don't do route updates */
if (bm->rmap_update_timer) {
- thread_add_timer(bm->master, bgp_route_map_update_timer,
- NULL, bm->rmap_update_timer,
- &bm->t_rmap_update);
+ event_add_timer(bm->master, bgp_route_map_update_timer, NULL,
+ bm->rmap_update_timer, &bm->t_rmap_update);
/* Signal the groups that a route-map update event has
* started */
struct prefix prefix;
struct pfx_record rec;
- thread_add_read(bm->master, bgpd_sync_callback, NULL,
- rpki_sync_socket_bgpd, NULL);
+ event_add_read(bm->master, bgpd_sync_callback, NULL,
+ rpki_sync_socket_bgpd, NULL);
if (atomic_load_explicit(&rtr_update_overflow, memory_order_seq_cst)) {
while (read(rpki_sync_socket_bgpd, &rec,
rrp->prefix = prefix;
rrp->afi = afi;
rrp->safi = safi;
- thread_add_event(bm->master, rpki_revalidate_prefix,
- rrp, 0, &bgp->t_revalidate[afi][safi]);
+ event_add_event(bm->master, rpki_revalidate_prefix, rrp,
+ 0, &bgp->t_revalidate[afi][safi]);
}
}
}
rvp->afi = afi;
rvp->safi = safi;
- thread_add_event(
+ event_add_event(
bm->master, bgp_rpki_revalidate_peer,
rvp, 0,
&peer->t_revalidate_all[afi][safi]);
}
- thread_add_read(bm->master, bgpd_sync_callback, NULL,
- rpki_sync_socket_bgpd, NULL);
+ event_add_read(bm->master, bgpd_sync_callback, NULL,
+ rpki_sync_socket_bgpd, NULL);
return;
{
if (!rtr_mgr_conf_in_sync(rtr_config)) {
RPKI_DEBUG("rtr_mgr is not synced, retrying.");
- thread_add_timer(bm->master, sync_expired, NULL,
- BGP_RPKI_CACHE_SERVER_SYNC_RETRY_TIMEOUT,
- &t_rpki_sync);
+ event_add_timer(bm->master, sync_expired, NULL,
+ BGP_RPKI_CACHE_SERVER_SYNC_RETRY_TIMEOUT,
+ &t_rpki_sync);
return;
}
return ERROR;
}
- thread_add_timer(bm->master, sync_expired, NULL, 0, &t_rpki_sync);
+ event_add_timer(bm->master, sync_expired, NULL, 0, &t_rpki_sync);
XFREE(MTYPE_BGP_RPKI_CACHE_GROUP, groups);
return false;
subgrp->t_merge_check = NULL;
- thread_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
- subgrp, 0, &subgrp->t_merge_check);
+ event_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
+ subgrp, 0, &subgrp->t_merge_check);
SUBGRP_INCR_STAT(subgrp, merge_checks_triggered);
*/
SUBGRP_FOREACH_PEER (subgrp, paf)
if (peer_established(paf->peer))
- thread_add_timer_msec(
+ event_add_timer_msec(
bm->master, bgp_generate_updgrp_packets,
paf->peer, 0,
&paf->peer->t_generate_updgrp_packets);
* We should wait for the coalesce timer. Arm the timer if not done.
*/
if (!subgrp->t_coalesce) {
- thread_add_timer_msec(bm->master, subgroup_coalesce_timer,
- subgrp, subgrp->v_coalesce,
- &subgrp->t_coalesce);
+ event_add_timer_msec(bm->master, subgroup_coalesce_timer,
+ subgrp, subgrp->v_coalesce,
+ &subgrp->t_coalesce);
}
}
{
#define BGP_PRE_CONFIG_MAX_WAIT_SECONDS 600
THREAD_OFF(t_bgp_cfg);
- thread_add_timer(bm->master, bgp_config_finish, NULL,
- BGP_PRE_CONFIG_MAX_WAIT_SECONDS, &t_bgp_cfg);
+ event_add_timer(bm->master, bgp_config_finish, NULL,
+ BGP_PRE_CONFIG_MAX_WAIT_SECONDS, &t_bgp_cfg);
}
/* When we receive a hook the configuration is read,
/* Start a new timer to make sure we don't send EoR
* before route-maps are processed.
*/
- thread_add_timer(bm->master, bgp_config_finish, NULL,
- bgp_post_config_delay, &t_bgp_cfg);
+ event_add_timer(bm->master, bgp_config_finish, NULL,
+ bgp_post_config_delay, &t_bgp_cfg);
}
static int config_write_interface_one(struct vty *vty, struct vrf *vrf)
}
}
}
- thread_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
- &bgp_tm_thread_connect);
+ event_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
+ &bgp_tm_thread_connect);
}
bool bgp_zebra_tm_chunk_obtained(void)
bgp_tm_min = bgp_tm_max = 0;
bgp_tm_chunk_size = BGP_FLOWSPEC_TABLE_CHUNK;
bgp_tm_bgp = bgp;
- thread_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
- &bgp_tm_thread_connect);
+ event_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
+ &bgp_tm_thread_connect);
}
int bgp_zebra_get_table_range(uint32_t chunk_size,
if (name)
bgp->name = XSTRDUP(MTYPE_BGP, name);
- thread_add_timer(bm->master, bgp_startup_timer_expire, bgp,
- bgp->restart_time, &bgp->t_startup);
+ event_add_timer(bm->master, bgp_startup_timer_expire, bgp,
+ bgp->restart_time, &bgp->t_startup);
/* printable name we can use in debug messages */
if (inst_type == BGP_INSTANCE_TYPE_DEFAULT) {
if (lifetime > UINT32_MAX / 1001) {
/* sub-optimal case, but will probably never happen */
bpi->extra->vnc.import.timer = NULL;
- thread_add_timer(bm->master, timer_service_func, wcb, lifetime,
- &bpi->extra->vnc.import.timer);
+ event_add_timer(bm->master, timer_service_func, wcb, lifetime,
+ &bpi->extra->vnc.import.timer);
} else {
static uint32_t jitter;
uint32_t lifetime_msec;
lifetime_msec = (lifetime * 1000) + jitter;
bpi->extra->vnc.import.timer = NULL;
- thread_add_timer_msec(bm->master, timer_service_func, wcb,
- lifetime_msec,
- &bpi->extra->vnc.import.timer);
+ event_add_timer_msec(bm->master, timer_service_func, wcb,
+ lifetime_msec,
+ &bpi->extra->vnc.import.timer);
}
/* re-sort route list (BGP_PATH_REMOVED routes are last) */
m->rfd->response_lifetime);
}
- thread_add_timer(bm->master, rfapiMonitorTimerExpire, m,
- m->rfd->response_lifetime, &m->timer);
+ event_add_timer(bm->master, rfapiMonitorTimerExpire, m,
+ m->rfd->response_lifetime, &m->timer);
}
/*
m->rfd->response_lifetime);
}
- thread_add_timer(bm->master, rfapiMonitorEthTimerExpire, m,
- m->rfd->response_lifetime, &m->timer);
+ event_add_timer(bm->master, rfapiMonitorEthTimerExpire, m,
+ m->rfd->response_lifetime, &m->timer);
}
static int mon_eth_cmp(const void *a, const void *b)
vnc_zlog_debug_verbose("%s: rfd %p pfx %pRN life %u", __func__, rfd, rn,
ri->lifetime);
- thread_add_timer(bm->master, rfapiRibExpireTimer, tcb, ri->lifetime,
- &ri->timer);
+ event_add_timer(bm->master, rfapiRibExpireTimer, tcb, ri->lifetime,
+ &ri->timer);
}
extern void rfapi_rib_key_init(struct prefix *prefix, /* may be NULL */
if (!eti->timer && eti->lifetime <= INT32_MAX) {
eti->timer = NULL;
- thread_add_timer(bm->master, vncExportWithdrawTimer, eti,
- eti->lifetime, &eti->timer);
+ event_add_timer(bm->master, vncExportWithdrawTimer, eti,
+ eti->lifetime, &eti->timer);
vnc_zlog_debug_verbose(
"%s: set expiration timer for %u seconds", __func__,
eti->lifetime);
::
- thread_add_read(struct thread_master *master, int (*handler)(struct event *), void *arg, int fd, struct event **ref);
+ event_add_read(struct thread_master *master, int (*handler)(struct event *), void *arg, int fd, struct event **ref);
The ``struct event`` is then created and added to the appropriate internal
datastructure within the ``threadmaster``. Note that the ``READ`` and
- ``fetch`` is ``thread_fetch()``
- ``exec()`` is ``thread_call``
- ``cancel()`` is ``thread_cancel()``
-- ``schedule()`` is any of the various task-specific ``thread_add_*`` functions
+- ``schedule()`` is any of the various task-specific ``event_add_*`` functions
Adding tasks is done with various task-specific function-like macros. These
macros wrap underlying functions in :file:`thread.c` to provide additional
information added at compile time, such as the line number the task was
scheduled from, that can be accessed at runtime for debugging, logging and
informational purposes. Each task type has its own specific scheduling function
-that follow the naming convention ``thread_add_<type>``; see :file:`event.h`
+that follow the naming convention ``event_add_<type>``; see :file:`event.h`
for details.
There are some gotchas to keep in mind:
thread_cancel(&(e->t_distribute));
/* schedule Graceful restart for whole process in 10sec */
- thread_add_timer(master, eigrp_distribute_timer_process, e,
- (10), &e->t_distribute);
+ event_add_timer(master, eigrp_distribute_timer_process, e, (10),
+ &e->t_distribute);
return;
}
/* Cancel GR scheduled */
thread_cancel(&(ei->t_distribute));
/* schedule Graceful restart for interface in 10sec */
- thread_add_timer(master, eigrp_distribute_timer_interface, ei, 10,
- &ei->t_distribute);
+ event_add_timer(master, eigrp_distribute_timer_interface, ei, 10,
+ &ei->t_distribute);
}
/*
eigrp_hello_send(ei, EIGRP_HELLO_NORMAL, NULL);
/* Hello timer set. */
- thread_add_timer(master, eigrp_hello_timer, ei, ei->params.v_hello,
- &ei->t_hello);
+ event_add_timer(master, eigrp_hello_timer, ei, ei->params.v_hello,
+ &ei->t_hello);
}
/**
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
nbr->ei->on_write_q = 1;
}
- thread_add_write(master, eigrp_write, nbr->ei->eigrp,
- nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
+ event_add_write(master, eigrp_write, nbr->ei->eigrp,
+ nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
}
}
thread_execute(master, eigrp_write, ei->eigrp,
ei->eigrp->fd);
} else {
- thread_add_write(master, eigrp_write, ei->eigrp,
- ei->eigrp->fd,
- &ei->eigrp->t_write);
+ event_add_write(master, eigrp_write, ei->eigrp,
+ ei->eigrp->fd,
+ &ei->eigrp->t_write);
}
}
}
/* Set multicast memberships appropriately for new state. */
eigrp_if_set_multicast(ei);
- thread_add_event(master, eigrp_hello_timer, ei, (1), &ei->t_hello);
+ event_add_event(master, eigrp_hello_timer, ei, (1), &ei->t_hello);
/*Prepare metrics*/
metric.bandwidth = eigrp_bandwidth_to_scaled(ei->params.bandwidth);
/* FSM macros*/
#define EIGRP_FSM_EVENT_SCHEDULE(I, E) \
- thread_add_event(master, eigrp_fsm_event, (I), (E))
+ event_add_event(master, eigrp_fsm_event, (I), (E))
#endif /* _ZEBRA_EIGRP_MACROS_H_ */
case EIGRP_NEIGHBOR_PENDING: {
/*Reset Hold Down Timer for neighbor*/
THREAD_OFF(nbr->t_holddown);
- thread_add_timer(master, holddown_timer_expired, nbr,
- nbr->v_holddown, &nbr->t_holddown);
+ event_add_timer(master, holddown_timer_expired, nbr,
+ nbr->v_holddown, &nbr->t_holddown);
break;
}
case EIGRP_NEIGHBOR_UP: {
/*Reset Hold Down Timer for neighbor*/
THREAD_OFF(nbr->t_holddown);
- thread_add_timer(master, holddown_timer_expired, nbr,
- nbr->v_holddown, &nbr->t_holddown);
+ event_add_timer(master, holddown_timer_expired, nbr,
+ nbr->v_holddown, &nbr->t_holddown);
break;
}
}
/* If packets still remain in queue, call write thread. */
if (!list_isempty(eigrp->oi_write_q)) {
- thread_add_write(master, eigrp_write, eigrp, eigrp->fd,
- &eigrp->t_write);
+ event_add_write(master, eigrp_write, eigrp, eigrp->fd,
+ &eigrp->t_write);
}
}
eigrp = THREAD_ARG(thread);
/* prepare for next packet. */
- thread_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
+ event_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
stream_reset(eigrp->ibuf);
if (!(ibuf = eigrp_recv_packet(eigrp, eigrp->fd, &ifp, eigrp->ibuf))) {
eigrp_fifo_push(nbr->ei->obuf, duplicate);
/*Start retransmission timer*/
- thread_add_timer(master, eigrp_unack_packet_retrans, nbr,
- EIGRP_PACKET_RETRANS_TIME,
- &ep->t_retrans_timer);
+ event_add_timer(master, eigrp_unack_packet_retrans, nbr,
+ EIGRP_PACKET_RETRANS_TIME,
+ &ep->t_retrans_timer);
/*Increment sequence number counter*/
nbr->ei->eigrp->sequence_number++;
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
nbr->ei->on_write_q = 1;
}
- thread_add_write(master, eigrp_write, nbr->ei->eigrp,
- nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
+ event_add_write(master, eigrp_write, nbr->ei->eigrp,
+ nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
}
}
}
/*Start retransmission timer*/
- thread_add_timer(master, eigrp_unack_packet_retrans, nbr,
- EIGRP_PACKET_RETRANS_TIME,
- &ep->t_retrans_timer);
+ event_add_timer(master, eigrp_unack_packet_retrans, nbr,
+ EIGRP_PACKET_RETRANS_TIME,
+ &ep->t_retrans_timer);
/* Hook thread to write packet. */
if (nbr->ei->on_write_q == 0) {
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
nbr->ei->on_write_q = 1;
}
- thread_add_write(master, eigrp_write, nbr->ei->eigrp,
- nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
+ event_add_write(master, eigrp_write, nbr->ei->eigrp,
+ nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
}
}
}
/*Start retransmission timer*/
- thread_add_timer(master, eigrp_unack_multicast_packet_retrans,
- nbr, EIGRP_PACKET_RETRANS_TIME,
- &ep->t_retrans_timer);
+ event_add_timer(master, eigrp_unack_multicast_packet_retrans,
+ nbr, EIGRP_PACKET_RETRANS_TIME,
+ &ep->t_retrans_timer);
/* Hook thread to write packet. */
if (nbr->ei->on_write_q == 0) {
listnode_add(nbr->ei->eigrp->oi_write_q, nbr->ei);
nbr->ei->on_write_q = 1;
}
- thread_add_write(master, eigrp_write, nbr->ei->eigrp,
- nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
+ event_add_write(master, eigrp_write, nbr->ei->eigrp,
+ nbr->ei->eigrp->fd, &nbr->ei->eigrp->t_write);
}
}
/* if there is packet waiting in queue,
* schedule this thread again with small delay */
if (nbr->retrans_queue->count > 0) {
- thread_add_timer_msec(master, eigrp_update_send_GR_thread, nbr,
- 10, &nbr->t_nbr_send_gr);
+ event_add_timer_msec(master, eigrp_update_send_GR_thread, nbr,
+ 10, &nbr->t_nbr_send_gr);
return;
}
eigrp->ibuf = stream_new(EIGRP_PACKET_MAX_LEN + 1);
- thread_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
+ event_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
eigrp->oi_write_q = list_new();
eigrp->topology_table = route_table_init();
if (f->initial_sync_timeout)
return;
- thread_add_timer(master, fabricd_initial_sync_timeout, f,
- timeout, &f->initial_sync_timeout);
+ event_add_timer(master, fabricd_initial_sync_timeout, f, timeout,
+ &f->initial_sync_timeout);
f->initial_sync_start = monotime(NULL);
if (IS_DEBUG_ADJ_PACKETS)
zlog_info("OpenFabric: Got tier %hhu from algorithm. Arming timer.",
tier);
f->tier_pending = tier;
- thread_add_timer(master, fabricd_tier_set_timer, f,
- f->area->lsp_gen_interval[ISIS_LEVEL2 - 1],
- &f->tier_set_timer);
-
+ event_add_timer(master, fabricd_tier_set_timer, f,
+ f->area->lsp_gen_interval[ISIS_LEVEL2 - 1],
+ &f->tier_set_timer);
}
static void fabricd_bump_tier_calculation_timer(struct fabricd *f)
* the calculation */
THREAD_OFF(f->tier_calculation_timer);
- thread_add_timer(master, fabricd_tier_calculation_cb, f,
- 2 * f->area->lsp_gen_interval[ISIS_LEVEL2 - 1],
- &f->tier_calculation_timer);
+ event_add_timer(master, fabricd_tier_calculation_cb, f,
+ 2 * f->area->lsp_gen_interval[ISIS_LEVEL2 - 1],
+ &f->tier_calculation_timer);
}
static void fabricd_set_tier(struct fabricd *f, uint8_t tier)
continue;
THREAD_OFF(circuit->t_send_csnp[ISIS_LEVEL2 - 1]);
- thread_add_timer_msec(master, send_l2_csnp, circuit,
- isis_jitter(f->csnp_delay, CSNP_JITTER),
- &circuit->t_send_csnp[ISIS_LEVEL2 - 1]);
+ event_add_timer_msec(master, send_l2_csnp, circuit,
+ isis_jitter(f->csnp_delay, CSNP_JITTER),
+ &circuit->t_send_csnp[ISIS_LEVEL2 - 1]);
}
}
adj->flaps++;
if (level == IS_LEVEL_1) {
- thread_add_timer(master, send_l1_csnp,
- circuit, 0,
- &circuit->t_send_csnp[0]);
+ event_add_timer(
+ master, send_l1_csnp, circuit,
+ 0, &circuit->t_send_csnp[0]);
} else {
- thread_add_timer(master, send_l2_csnp,
- circuit, 0,
- &circuit->t_send_csnp[1]);
+ event_add_timer(
+ master, send_l2_csnp, circuit,
+ 0, &circuit->t_send_csnp[1]);
}
} else if (old_state == ISIS_ADJ_UP) {
circuit->upadjcount[level - 1]--;
void isis_circuit_prepare(struct isis_circuit *circuit)
{
#if ISIS_METHOD != ISIS_METHOD_DLPI
- thread_add_read(master, isis_receive, circuit, circuit->fd,
- &circuit->t_read);
+ event_add_read(master, isis_receive, circuit, circuit->fd,
+ &circuit->t_read);
#else
- thread_add_timer_msec(master, isis_receive, circuit,
- listcount(circuit->area->circuit_list) * 100,
- &circuit->t_read);
+ event_add_timer_msec(master, isis_receive, circuit,
+ listcount(circuit->area->circuit_list) * 100,
+ &circuit->t_read);
#endif
}
send_hello_sched(circuit, level, TRIGGERED_IIH_DELAY);
circuit->u.bc.lan_neighs[level - 1] = list_new();
- thread_add_timer(master, isis_run_dr,
- &circuit->level_arg[level - 1],
- 2 * circuit->hello_interval[level - 1],
- &circuit->u.bc.t_run_dr[level - 1]);
+ event_add_timer(master, isis_run_dr,
+ &circuit->level_arg[level - 1],
+ 2 * circuit->hello_interval[level - 1],
+ &circuit->u.bc.t_run_dr[level - 1]);
}
/* 8.4.1 b) FIXME: solicit ES - 8.4.6 */
/* initializing PSNP timers */
if (circuit->is_type & IS_LEVEL_1)
- thread_add_timer(
+ event_add_timer(
master, send_l1_psnp, circuit,
isis_jitter(circuit->psnp_interval[0], PSNP_JITTER),
&circuit->t_send_psnp[0]);
if (circuit->is_type & IS_LEVEL_2)
- thread_add_timer(
+ event_add_timer(
master, send_l2_psnp, circuit,
isis_jitter(circuit->psnp_interval[1], PSNP_JITTER),
&circuit->t_send_psnp[1]);
if (level == 1) {
memset(circuit->u.bc.l1_desig_is, 0, ISIS_SYS_ID_LEN + 1);
- thread_add_timer(master, send_l1_psnp, circuit,
- isis_jitter(circuit->psnp_interval[level - 1],
- PSNP_JITTER),
- &circuit->t_send_psnp[0]);
+ event_add_timer(master, send_l1_psnp, circuit,
+ isis_jitter(circuit->psnp_interval[level - 1],
+ PSNP_JITTER),
+ &circuit->t_send_psnp[0]);
} else {
memset(circuit->u.bc.l2_desig_is, 0, ISIS_SYS_ID_LEN + 1);
- thread_add_timer(master, send_l2_psnp, circuit,
- isis_jitter(circuit->psnp_interval[level - 1],
- PSNP_JITTER),
- &circuit->t_send_psnp[1]);
+ event_add_timer(master, send_l2_psnp, circuit,
+ isis_jitter(circuit->psnp_interval[level - 1],
+ PSNP_JITTER),
+ &circuit->t_send_psnp[1]);
}
THREAD_OFF(circuit->t_send_csnp[level - 1]);
- thread_add_timer(master, isis_run_dr,
- &circuit->level_arg[level - 1],
- 2 * circuit->hello_interval[level - 1],
- &circuit->u.bc.t_run_dr[level - 1]);
+ event_add_timer(master, isis_run_dr, &circuit->level_arg[level - 1],
+ 2 * circuit->hello_interval[level - 1],
+ &circuit->u.bc.t_run_dr[level - 1]);
- thread_add_event(master, isis_event_dis_status_change, circuit, 0,
- NULL);
+ event_add_event(master, isis_event_dis_status_change, circuit, 0, NULL);
return ISIS_OK;
}
assert(circuit->circuit_id); /* must be non-zero */
lsp_generate_pseudo(circuit, 1);
- thread_add_timer(master, send_l1_csnp, circuit,
- isis_jitter(circuit->csnp_interval[level - 1],
- CSNP_JITTER),
- &circuit->t_send_csnp[0]);
+ event_add_timer(master, send_l1_csnp, circuit,
+ isis_jitter(circuit->csnp_interval[level - 1],
+ CSNP_JITTER),
+ &circuit->t_send_csnp[0]);
} else {
memcpy(old_dr, circuit->u.bc.l2_desig_is, ISIS_SYS_ID_LEN + 1);
assert(circuit->circuit_id); /* must be non-zero */
lsp_generate_pseudo(circuit, 2);
- thread_add_timer(master, send_l2_csnp, circuit,
- isis_jitter(circuit->csnp_interval[level - 1],
- CSNP_JITTER),
- &circuit->t_send_csnp[1]);
+ event_add_timer(master, send_l2_csnp, circuit,
+ isis_jitter(circuit->csnp_interval[level - 1],
+ CSNP_JITTER),
+ &circuit->t_send_csnp[1]);
}
- thread_add_timer(master, isis_run_dr,
- &circuit->level_arg[level - 1],
- 2 * circuit->hello_interval[level - 1],
- &circuit->u.bc.t_run_dr[level - 1]);
- thread_add_event(master, isis_event_dis_status_change, circuit, 0,
- NULL);
+ event_add_timer(master, isis_run_dr, &circuit->level_arg[level - 1],
+ 2 * circuit->hello_interval[level - 1],
+ &circuit->u.bc.t_run_dr[level - 1]);
+ event_add_event(master, isis_event_dis_status_change, circuit, 0, NULL);
return ISIS_OK;
}
{
isis->dyn_cache = list_new();
if (!CHECK_FLAG(im->options, F_ISIS_UNIT_TEST))
- thread_add_timer(master, dyn_cache_cleanup, isis, 120,
- &isis->t_dync_clean);
+ event_add_timer(master, dyn_cache_cleanup, isis, 120,
+ &isis->t_dync_clean);
}
void dyn_cache_finish(struct isis *isis)
XFREE(MTYPE_ISIS_DYNHN, dyn);
}
- thread_add_timer(master, dyn_cache_cleanup, isis, 120,
+ event_add_timer(master, dyn_cache_cleanup, isis, 120,
&isis->t_dync_clean);
}
if (!circuit->is_passive) {
if (level == 1) {
- thread_add_timer(master, send_l1_psnp, circuit,
- isis_jitter(circuit->psnp_interval[0],
- PSNP_JITTER),
- &circuit->t_send_psnp[0]);
+ event_add_timer(master, send_l1_psnp, circuit,
+ isis_jitter(circuit->psnp_interval[0],
+ PSNP_JITTER),
+ &circuit->t_send_psnp[0]);
} else {
- thread_add_timer(master, send_l2_psnp, circuit,
- isis_jitter(circuit->psnp_interval[1],
- PSNP_JITTER),
- &circuit->t_send_psnp[1]);
+ event_add_timer(master, send_l2_psnp, circuit,
+ isis_jitter(circuit->psnp_interval[1],
+ PSNP_JITTER),
+ &circuit->t_send_psnp[1]);
}
}
if (circuit->circ_type == CIRCUIT_T_BROADCAST) {
- thread_add_timer(master, isis_run_dr,
- &circuit->level_arg[level - 1],
- 2 * circuit->hello_interval[level - 1],
- &circuit->u.bc.t_run_dr[level - 1]);
+ event_add_timer(master, isis_run_dr,
+ &circuit->level_arg[level - 1],
+ 2 * circuit->hello_interval[level - 1],
+ &circuit->u.bc.t_run_dr[level - 1]);
send_hello_sched(circuit, level, TRIGGERED_IIH_DELAY);
circuit->u.bc.lan_neighs[level - 1] = list_new();
ils_debug("%s: start holddown timer for %s time %d", __func__,
circuit->interface->name, ldp_sync_info->holddown);
- thread_add_timer(master, isis_ldp_sync_holddown_timer,
- circuit, ldp_sync_info->holddown,
- &ldp_sync_info->t_holddown);
+ event_add_timer(master, isis_ldp_sync_holddown_timer, circuit,
+ ldp_sync_info->holddown, &ldp_sync_info->t_holddown);
}
/*
spftree->lfa.protection_counters.rlfa[vertex->N.ip.priority] += 1;
THREAD_OFF(area->t_rlfa_rib_update);
- thread_add_timer(master, isis_area_verify_routes_cb, area, 2,
- &area->t_rlfa_rib_update);
+ event_add_timer(master, isis_area_verify_routes_cb, area, 2,
+ &area->t_rlfa_rib_update);
return 0;
}
spftree->lfa.protection_counters.rlfa[vertex->N.ip.priority] -= 1;
THREAD_OFF(area->t_rlfa_rib_update);
- thread_add_timer(master, isis_area_verify_routes_cb, area, 2,
- &area->t_rlfa_rib_update);
+ event_add_timer(master, isis_area_verify_routes_cb, area, 2,
+ &area->t_rlfa_rib_update);
}
void isis_rlfa_list_init(struct isis_spftree *spftree)
overload_time = isis_restart_read_overload_time(area);
if (overload_time > 0) {
isis_area_overload_bit_set(area, true);
- thread_add_timer(master, set_overload_on_start_timer,
- area, overload_time,
- &area->t_overload_on_startup_timer);
+ event_add_timer(master, set_overload_on_start_timer,
+ area, overload_time,
+ &area->t_overload_on_startup_timer);
}
device_startup = false;
}
THREAD_OFF(area->t_lsp_refresh[level - 1]);
area->lsp_regenerate_pending[level - 1] = 0;
- thread_add_timer(master, lsp_refresh,
- &area->lsp_refresh_arg[level - 1], refresh_time,
- &area->t_lsp_refresh[level - 1]);
+ event_add_timer(master, lsp_refresh, &area->lsp_refresh_arg[level - 1],
+ refresh_time, &area->t_lsp_refresh[level - 1]);
if (IS_DEBUG_UPDATE_PACKETS) {
zlog_debug("ISIS-Upd (%s): Building L%d LSP %s, len %hu, seq 0x%08x, cksum 0x%04hx, lifetime %hus refresh %hus",
lsp_seqno_update(lsp);
refresh_time = lsp_refresh_time(lsp, rem_lifetime);
- thread_add_timer(master, lsp_refresh,
- &area->lsp_refresh_arg[level - 1], refresh_time,
- &area->t_lsp_refresh[level - 1]);
+ event_add_timer(master, lsp_refresh, &area->lsp_refresh_arg[level - 1],
+ refresh_time, &area->t_lsp_refresh[level - 1]);
area->lsp_regenerate_pending[level - 1] = 0;
if (IS_DEBUG_UPDATE_PACKETS) {
}
area->lsp_regenerate_pending[lvl - 1] = 1;
- thread_add_timer_msec(master, lsp_refresh,
- &area->lsp_refresh_arg[lvl - 1],
- timeout,
- &area->t_lsp_refresh[lvl - 1]);
+ event_add_timer_msec(master, lsp_refresh,
+ &area->lsp_refresh_arg[lvl - 1], timeout,
+ &area->t_lsp_refresh[lvl - 1]);
}
if (all_pseudo) {
THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
circuit->lsp_regenerate_pending[level - 1] = 0;
if (level == IS_LEVEL_1)
- thread_add_timer(
- master, lsp_l1_refresh_pseudo, circuit, refresh_time,
- &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
+ event_add_timer(master, lsp_l1_refresh_pseudo, circuit,
+ refresh_time,
+ &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
else if (level == IS_LEVEL_2)
- thread_add_timer(
- master, lsp_l2_refresh_pseudo, circuit, refresh_time,
- &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
+ event_add_timer(master, lsp_l2_refresh_pseudo, circuit,
+ refresh_time,
+ &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
if (IS_DEBUG_UPDATE_PACKETS) {
zlog_debug(
refresh_time = lsp_refresh_time(lsp, rem_lifetime);
if (level == IS_LEVEL_1)
- thread_add_timer(
- master, lsp_l1_refresh_pseudo, circuit, refresh_time,
- &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
+ event_add_timer(master, lsp_l1_refresh_pseudo, circuit,
+ refresh_time,
+ &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
else if (level == IS_LEVEL_2)
- thread_add_timer(
- master, lsp_l2_refresh_pseudo, circuit, refresh_time,
- &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
+ event_add_timer(master, lsp_l2_refresh_pseudo, circuit,
+ refresh_time,
+ &circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
if (IS_DEBUG_UPDATE_PACKETS) {
zlog_debug(
circuit->lsp_regenerate_pending[lvl - 1] = 1;
if (lvl == IS_LEVEL_1) {
- thread_add_timer_msec(
+ event_add_timer_msec(
master, lsp_l1_refresh_pseudo, circuit, timeout,
&circuit->u.bc.t_refresh_pseudo_lsp[lvl - 1]);
} else if (lvl == IS_LEVEL_2) {
- thread_add_timer_msec(
+ event_add_timer_msec(
master, lsp_l2_refresh_pseudo, circuit, timeout,
&circuit->u.bc.t_refresh_pseudo_lsp[lvl - 1]);
}
area = THREAD_ARG(thread);
assert(area);
area->t_tick = NULL;
- thread_add_timer(master, lsp_tick, area, 1, &area->t_tick);
+ event_add_timer(master, lsp_tick, area, 1, &area->t_tick);
struct isis_circuit *fabricd_init_c = fabricd_initial_sync_circuit(area);
/* Max wait time for config to load before generating lsp */
#define ISIS_PRE_CONFIG_MAX_WAIT_SECONDS 600
THREAD_OFF(t_isis_cfg);
- thread_add_timer(im->master, isis_config_finish, NULL,
- ISIS_PRE_CONFIG_MAX_WAIT_SECONDS, &t_isis_cfg);
+ event_add_timer(im->master, isis_config_finish, NULL,
+ ISIS_PRE_CONFIG_MAX_WAIT_SECONDS, &t_isis_cfg);
}
static void isis_config_end(void)
/* lets take care of the expiry */
THREAD_OFF(adj->t_expire);
- thread_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
- &adj->t_expire);
+ event_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
+ &adj->t_expire);
/* While fabricds initial sync is in progress, ignore hellos from other
* interfaces than the one we are performing the initial sync on. */
: iih->circuit->u.bc.l2_desig_is;
if (memcmp(dis, iih->dis, ISIS_SYS_ID_LEN + 1)) {
- thread_add_event(master, isis_event_dis_status_change,
- iih->circuit, 0, NULL);
+ event_add_event(master, isis_event_dis_status_change,
+ iih->circuit, 0, NULL);
memcpy(dis, iih->dis, ISIS_SYS_ID_LEN + 1);
}
}
/* lets take care of the expiry */
THREAD_OFF(adj->t_expire);
- thread_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
- &adj->t_expire);
+ event_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
+ &adj->t_expire);
/*
* If the snpa for this circuit is found from LAN Neighbours TLV
THREAD_OFF(*threadp);
}
- thread_add_timer_msec(master, send_hello_cb,
- &circuit->level_arg[level - 1],
- isis_jitter(delay, IIH_JITTER),
- threadp);
+ event_add_timer_msec(master, send_hello_cb,
+ &circuit->level_arg[level - 1],
+ isis_jitter(delay, IIH_JITTER), threadp);
}
void send_hello_sched(struct isis_circuit *circuit, int level, long delay)
send_csnp(circuit, 1);
}
/* set next timer thread */
- thread_add_timer(master, send_l1_csnp, circuit,
- isis_jitter(circuit->csnp_interval[0], CSNP_JITTER),
- &circuit->t_send_csnp[0]);
+ event_add_timer(master, send_l1_csnp, circuit,
+ isis_jitter(circuit->csnp_interval[0], CSNP_JITTER),
+ &circuit->t_send_csnp[0]);
}
void send_l2_csnp(struct event *thread)
send_csnp(circuit, 2);
}
/* set next timer thread */
- thread_add_timer(master, send_l2_csnp, circuit,
- isis_jitter(circuit->csnp_interval[1], CSNP_JITTER),
- &circuit->t_send_csnp[1]);
+ event_add_timer(master, send_l2_csnp, circuit,
+ isis_jitter(circuit->csnp_interval[1], CSNP_JITTER),
+ &circuit->t_send_csnp[1]);
}
/*
send_psnp(1, circuit);
/* set next timer thread */
- thread_add_timer(master, send_l1_psnp, circuit,
- isis_jitter(circuit->psnp_interval[0], PSNP_JITTER),
- &circuit->t_send_psnp[0]);
+ event_add_timer(master, send_l1_psnp, circuit,
+ isis_jitter(circuit->psnp_interval[0], PSNP_JITTER),
+ &circuit->t_send_psnp[0]);
}
/*
send_psnp(2, circuit);
/* set next timer thread */
- thread_add_timer(master, send_l2_psnp, circuit,
- isis_jitter(circuit->psnp_interval[1], PSNP_JITTER),
- &circuit->t_send_psnp[1]);
+ event_add_timer(master, send_l2_psnp, circuit,
+ isis_jitter(circuit->psnp_interval[1], PSNP_JITTER),
+ &circuit->t_send_psnp[1]);
}
/*
if (area->spf_timer[level - 1])
return ISIS_OK;
- thread_add_timer_msec(master, isis_run_spf_cb,
- isis_run_spf_arg(area, level), delay,
- &area->spf_timer[level - 1]);
+ event_add_timer_msec(master, isis_run_spf_cb,
+ isis_run_spf_arg(area, level), delay,
+ &area->spf_timer[level - 1]);
return ISIS_OK;
}
timer = area->min_spf_interval[level - 1] - diff;
}
- thread_add_timer(master, isis_run_spf_cb, isis_run_spf_arg(area, level),
- timer, &area->spf_timer[level - 1]);
+ event_add_timer(master, isis_run_spf_cb, isis_run_spf_arg(area, level),
+ timer, &area->spf_timer[level - 1]);
if (IS_DEBUG_SPF_EVENTS)
zlog_debug("ISIS-SPF (%s) L%d SPF scheduled %ld sec from now",
if (!isis_zebra_label_manager_ready())
if (isis_zebra_label_manager_connect() < 0) {
/* Re-attempt to connect to Label Manager in 1 sec. */
- thread_add_timer(master, sr_start_label_manager, area,
- 1, &srdb->t_start_lm);
+ event_add_timer(master, sr_start_label_manager, area, 1,
+ &srdb->t_start_lm);
return -1;
}
struct isis_tx_queue_entry *e = THREAD_ARG(thread);
struct isis_tx_queue *queue = e->queue;
- thread_add_timer(master, tx_queue_send_event, e, 5, &e->retry);
+ event_add_timer(master, tx_queue_send_event, e, 5, &e->retry);
if (e->is_retry)
queue->circuit->area->lsp_rxmt_count++;
e->type = type;
THREAD_OFF(e->retry);
- thread_add_event(master, tx_queue_send_event, e, 0, &e->retry);
+ event_add_event(master, tx_queue_send_event, e, 0, &e->retry);
e->is_retry = false;
}
area->area_addrs->del = delete_area_addr;
if (!CHECK_FLAG(im->options, F_ISIS_UNIT_TEST))
- thread_add_timer(master, lsp_tick, area, 1, &area->t_tick);
+ event_add_timer(master, lsp_tick, area, 1, &area->t_tick);
flags_initialize(&area->flags);
isis_sr_area_init(area);
av->arg = arg;
LIST_INSERT_HEAD(&accept_queue.queue, av, entry);
- thread_add_read(master, accept_cb, av, av->fd, &av->ev);
+ event_add_read(master, accept_cb, av, av->fd, &av->ev);
log_debug("%s: accepting on fd %d", __func__, fd);
{
log_debug(__func__);
accept_unarm();
- thread_add_timer(master, accept_timeout, NULL, 1, &accept_queue.evt);
+ event_add_timer(master, accept_timeout, NULL, 1, &accept_queue.evt);
}
void
{
struct accept_ev *av;
LIST_FOREACH(av, &accept_queue.queue, entry) {
- thread_add_read(master, accept_cb, av, av->fd, &av->ev);
+ event_add_read(master, accept_cb, av, av->fd, &av->ev);
}
}
static void accept_cb(struct event *thread)
{
struct accept_ev *av = THREAD_ARG(thread);
- thread_add_read(master, accept_cb, av, av->fd, &av->ev);
+ event_add_read(master, accept_cb, av, av->fd, &av->ev);
av->accept_cb(thread);
}
{
THREAD_OFF(adj->inactivity_timer);
adj->inactivity_timer = NULL;
- thread_add_timer(master, adj_itimer, adj, adj->holdtime,
- &adj->inactivity_timer);
+ event_add_timer(master, adj_itimer, adj, adj->holdtime,
+ &adj->inactivity_timer);
}
void
{
THREAD_OFF(tnbr->hello_timer);
tnbr->hello_timer = NULL;
- thread_add_timer(master, tnbr_hello_timer, tnbr, tnbr_get_hello_interval(tnbr),
- &tnbr->hello_timer);
+ event_add_timer(master, tnbr_hello_timer, tnbr,
+ tnbr_get_hello_interval(tnbr), &tnbr->hello_timer);
}
static void
imsg_init(&c->iev.ibuf, connfd);
c->iev.handler_read = control_dispatch_imsg;
c->iev.ev_read = NULL;
- thread_add_read(master, c->iev.handler_read, &c->iev, c->iev.ibuf.fd,
- &c->iev.ev_read);
+ event_add_read(master, c->iev.handler_read, &c->iev, c->iev.ibuf.fd,
+ &c->iev.ev_read);
c->iev.handler_write = ldp_write_handler;
c->iev.ev_write = NULL;
if_start_hello_timer(struct iface_af *ia)
{
THREAD_OFF(ia->hello_timer);
- thread_add_timer(master, if_hello_timer, ia, if_get_hello_interval(ia),
- &ia->hello_timer);
+ event_add_timer(master, if_hello_timer, ia, if_get_hello_interval(ia),
+ &ia->hello_timer);
}
static void
return;
THREAD_OFF(iface->ldp_sync.wait_for_sync_timer);
- thread_add_timer(master, iface_wait_for_ldp_sync_timer, iface,
+ event_add_timer(master, iface_wait_for_ldp_sync_timer, iface,
if_get_wait_for_sync_interval(),
&iface->ldp_sync.wait_for_sync_timer);
}
fatal(NULL);
imsg_init(&iev_main->ibuf, LDPD_FD_ASYNC);
iev_main->handler_read = lde_dispatch_parent;
- thread_add_read(master, iev_main->handler_read, iev_main, iev_main->ibuf.fd,
- &iev_main->ev_read);
+ event_add_read(master, iev_main->handler_read, iev_main,
+ iev_main->ibuf.fd, &iev_main->ev_read);
iev_main->handler_write = ldp_write_handler;
memset(&iev_main_sync_data, 0, sizeof(iev_main_sync_data));
fatal(NULL);
imsg_init(&iev_ldpe->ibuf, fd);
iev_ldpe->handler_read = lde_dispatch_imsg;
- thread_add_read(master, iev_ldpe->handler_read, iev_ldpe, iev_ldpe->ibuf.fd,
- &iev_ldpe->ev_read);
+ event_add_read(master, iev_ldpe->handler_read, iev_ldpe,
+ iev_ldpe->ibuf.fd, &iev_ldpe->ev_read);
iev_ldpe->handler_write = ldp_write_handler;
iev_ldpe->ev_write = NULL;
break;
zclient_sync = NULL;
/* Retry using a timer */
- thread_add_timer(master, zclient_sync_retry, NULL, 1, NULL);
+ event_add_timer(master, zclient_sync_retry, NULL, 1, NULL);
}
static void
lde_gc_start_timer(void)
{
THREAD_OFF(gc_timer);
- thread_add_timer(master, lde_gc_timer, NULL, LDE_GC_INTERVAL,
- &gc_timer);
+ event_add_timer(master, lde_gc_timer, NULL, LDE_GC_INTERVAL, &gc_timer);
}
void
frr_config_fork();
/* apply configuration */
- thread_add_event(master, ldp_config_fork_apply, NULL, 0, NULL);
+ event_add_event(master, ldp_config_fork_apply, NULL, 0, NULL);
/* setup pipes to children */
if ((iev_ldpe = calloc(1, sizeof(struct imsgev))) == NULL ||
imsg_init(&iev_ldpe->ibuf, pipe_parent2ldpe[0]);
iev_ldpe->handler_read = main_dispatch_ldpe;
- thread_add_read(master, iev_ldpe->handler_read, iev_ldpe, iev_ldpe->ibuf.fd,
- &iev_ldpe->ev_read);
+ event_add_read(master, iev_ldpe->handler_read, iev_ldpe,
+ iev_ldpe->ibuf.fd, &iev_ldpe->ev_read);
iev_ldpe->handler_write = ldp_write_handler;
imsg_init(&iev_ldpe_sync->ibuf, pipe_parent2ldpe_sync[0]);
iev_ldpe_sync->handler_read = main_dispatch_ldpe;
- thread_add_read(master, iev_ldpe_sync->handler_read, iev_ldpe_sync, iev_ldpe_sync->ibuf.fd,
- &iev_ldpe_sync->ev_read);
+ event_add_read(master, iev_ldpe_sync->handler_read, iev_ldpe_sync,
+ iev_ldpe_sync->ibuf.fd, &iev_ldpe_sync->ev_read);
iev_ldpe_sync->handler_write = ldp_write_handler;
imsg_init(&iev_lde->ibuf, pipe_parent2lde[0]);
iev_lde->handler_read = main_dispatch_lde;
- thread_add_read(master, iev_lde->handler_read, iev_lde, iev_lde->ibuf.fd,
- &iev_lde->ev_read);
+ event_add_read(master, iev_lde->handler_read, iev_lde, iev_lde->ibuf.fd,
+ &iev_lde->ev_read);
iev_lde->handler_write = ldp_write_handler;
imsg_init(&iev_lde_sync->ibuf, pipe_parent2lde_sync[0]);
iev_lde_sync->handler_read = main_dispatch_lde;
- thread_add_read(master, iev_lde_sync->handler_read, iev_lde_sync, iev_lde_sync->ibuf.fd,
- &iev_lde_sync->ev_read);
+ event_add_read(master, iev_lde_sync->handler_read, iev_lde_sync,
+ iev_lde_sync->ibuf.fd, &iev_lde_sync->ev_read);
iev_lde_sync->handler_write = ldp_write_handler;
if (main_imsg_send_ipc_sockets(&iev_ldpe->ibuf, &iev_lde->ibuf))
imsg_event_add(struct imsgev *iev)
{
if (iev->handler_read)
- thread_add_read(master, iev->handler_read, iev, iev->ibuf.fd,
- &iev->ev_read);
+ event_add_read(master, iev->handler_read, iev, iev->ibuf.fd,
+ &iev->ev_read);
if (iev->handler_write && iev->ibuf.w.queued)
- thread_add_write(master, iev->handler_write, iev,
- iev->ibuf.fd, &iev->ev_write);
+ event_add_write(master, iev->handler_write, iev, iev->ibuf.fd,
+ &iev->ev_write);
}
int
evbuf_event_add(struct evbuf *eb)
{
if (eb->wbuf.queued)
- thread_add_write(master, eb->handler, eb->arg, eb->wbuf.fd,
- &eb->ev);
+ event_add_write(master, eb->handler, eb->arg, eb->wbuf.fd,
+ &eb->ev);
}
void evbuf_init(struct evbuf *eb, int fd, void (*handler)(struct event *),
fatal(NULL);
imsg_init(&iev_main->ibuf, LDPD_FD_ASYNC);
iev_main->handler_read = ldpe_dispatch_main;
- thread_add_read(master, iev_main->handler_read, iev_main, iev_main->ibuf.fd,
- &iev_main->ev_read);
+ event_add_read(master, iev_main->handler_read, iev_main,
+ iev_main->ibuf.fd, &iev_main->ev_read);
iev_main->handler_write = ldp_write_handler;
memset(&iev_main_data, 0, sizeof(iev_main_data));
/* This socket must be open before dropping privileges. */
global.pfkeysock = pfkey_init();
if (sysdep.no_pfkey == 0) {
- thread_add_read(master, ldpe_dispatch_pfkey, NULL, global.pfkeysock,
- &pfkey_ev);
+ event_add_read(master, ldpe_dispatch_pfkey, NULL,
+ global.pfkeysock, &pfkey_ev);
}
#endif
fatal(NULL);
imsg_init(&iev_lde->ibuf, fd);
iev_lde->handler_read = ldpe_dispatch_lde;
- thread_add_read(master, iev_lde->handler_read, iev_lde, iev_lde->ibuf.fd,
- &iev_lde->ev_read);
+ event_add_read(master, iev_lde->handler_read, iev_lde,
+ iev_lde->ibuf.fd, &iev_lde->ev_read);
iev_lde->handler_write = ldp_write_handler;
iev_lde->ev_write = NULL;
break;
{
int fd = THREAD_FD(thread);
- thread_add_read(master, ldpe_dispatch_pfkey, NULL, global.pfkeysock,
- &pfkey_ev);
+ event_add_read(master, ldpe_dispatch_pfkey, NULL, global.pfkeysock,
+ &pfkey_ev);
if (pfkey_read(fd, NULL) == -1)
fatal("pfkey_read failed, exiting...");
/* discovery socket */
af_global->ldp_disc_socket = disc_socket;
- thread_add_read(master, disc_recv_packet, &af_global->disc_ev, af_global->ldp_disc_socket,
- &af_global->disc_ev);
+ event_add_read(master, disc_recv_packet, &af_global->disc_ev,
+ af_global->ldp_disc_socket, &af_global->disc_ev);
/* extended discovery socket */
af_global->ldp_edisc_socket = edisc_socket;
- thread_add_read(master, disc_recv_packet, &af_global->edisc_ev, af_global->ldp_edisc_socket,
- &af_global->edisc_ev);
+ event_add_read(master, disc_recv_packet, &af_global->edisc_ev,
+ af_global->ldp_edisc_socket, &af_global->edisc_ev);
/* session socket */
af_global->ldp_session_socket = session_socket;
secs = nbr->keepalive / KEEPALIVE_PER_PERIOD;
THREAD_OFF(nbr->keepalive_timer);
nbr->keepalive_timer = NULL;
- thread_add_timer(master, nbr_ktimer, nbr, secs, &nbr->keepalive_timer);
+ event_add_timer(master, nbr_ktimer, nbr, secs, &nbr->keepalive_timer);
}
void
{
THREAD_OFF(nbr->keepalive_timeout);
nbr->keepalive_timeout = NULL;
- thread_add_timer(master, nbr_ktimeout, nbr, nbr->keepalive,
- &nbr->keepalive_timeout);
+ event_add_timer(master, nbr_ktimeout, nbr, nbr->keepalive,
+ &nbr->keepalive_timeout);
}
void
secs = INIT_FSM_TIMEOUT;
THREAD_OFF(nbr->init_timeout);
nbr->init_timeout = NULL;
- thread_add_timer(master, nbr_itimeout, nbr, secs, &nbr->init_timeout);
+ event_add_timer(master, nbr_itimeout, nbr, secs, &nbr->init_timeout);
}
void
THREAD_OFF(nbr->initdelay_timer);
nbr->initdelay_timer = NULL;
- thread_add_timer(master, nbr_idtimer, nbr, secs,
- &nbr->initdelay_timer);
+ event_add_timer(master, nbr_idtimer, nbr, secs, &nbr->initdelay_timer);
}
void
if (connect(nbr->fd, &remote_su.sa, sockaddr_len(&remote_su.sa))
== -1) {
if (errno == EINPROGRESS) {
- thread_add_write(master, nbr_connect_cb, nbr, nbr->fd,
- &nbr->ev_connect);
+ event_add_write(master, nbr_connect_cb, nbr, nbr->fd,
+ &nbr->ev_connect);
return (0);
}
log_warn("%s: error while connecting to %s", __func__,
struct in_addr lsr_id;
/* reschedule read */
- thread_add_read(master, disc_recv_packet, threadp, fd, threadp);
+ event_add_read(master, disc_recv_packet, threadp, fd, threadp);
/* setup buffer */
memset(&m, 0, sizeof(m));
uint16_t pdu_len, msg_len, msg_size, max_pdu_len;
int ret;
- thread_add_read(master, session_read, nbr, fd, &tcp->rev);
+ event_add_read(master, session_read, nbr, fd, &tcp->rev);
if ((n = read(fd, tcp->rbuf->buf + tcp->rbuf->wpos,
sizeof(tcp->rbuf->buf) - tcp->rbuf->wpos)) == -1) {
if ((tcp->rbuf = calloc(1, sizeof(struct ibuf_read))) == NULL)
fatal(__func__);
- thread_add_read(master, session_read, nbr, tcp->fd, &tcp->rev);
+ event_add_read(master, session_read, nbr, tcp->fd, &tcp->rev);
tcp->nbr = nbr;
}
pconn->addr = *addr;
TAILQ_INSERT_TAIL(&global.pending_conns, pconn, entry);
pconn->ev_timeout = NULL;
- thread_add_timer(master, pending_conn_timeout, pconn, PENDING_CONN_TIMEOUT,
- &pconn->ev_timeout);
+ event_add_timer(master, pending_conn_timeout, pconn,
+ PENDING_CONN_TIMEOUT, &pconn->ev_timeout);
return (pconn);
}
snmp_select_info(&maxfd, &fds, &timeout, &block);
if (!block) {
- thread_add_timer_tv(agentx_tm, agentx_timeout, NULL, &timeout,
- &timeout_thr);
+ event_add_timer_tv(agentx_tm, agentx_timeout, NULL, &timeout,
+ &timeout_thr);
}
ln = listhead(events);
thr = XCALLOC(MTYPE_TMP, sizeof(struct event *));
newln = listnode_add_before(events, ln, thr);
- thread_add_read(agentx_tm, agentx_read, newln, fd, thr);
+ event_add_read(agentx_tm, agentx_read, newln, fd, thr);
}
}
* Next event.
*
* This variable controls what action to execute when the command batch
- * finishes. Normally we'd use `thread_add_event` value, however since
+ * finishes. Normally we'd use `event_add_event` value, however since
* that function is going to be called multiple times and the value
* might be different we'll use this variable to keep track of it.
*/
void bfd_sess_install(struct bfd_session_params *bsp)
{
bsp->lastev = BSE_INSTALL;
- thread_add_event(bsglobal.tm, _bfd_sess_send, bsp, 0, &bsp->installev);
+ event_add_event(bsglobal.tm, _bfd_sess_send, bsp, 0, &bsp->installev);
}
void bfd_sess_uninstall(struct bfd_session_params *bsp)
{
bsp->lastev = BSE_UNINSTALL;
- thread_add_event(bsglobal.tm, _bfd_sess_send, bsp, 0, &bsp->installev);
+ event_add_event(bsglobal.tm, _bfd_sess_send, bsp, 0, &bsp->installev);
}
enum bfd_session_state bfd_sess_status(const struct bfd_session_params *bsp)
}
/* Add new read thread. */
-void _thread_add_read_write(const struct xref_threadsched *xref,
- struct thread_master *m,
- void (*func)(struct event *), void *arg, int fd,
- struct event **t_ptr)
+void _event_add_read_write(const struct xref_threadsched *xref,
+ struct thread_master *m,
+ void (*func)(struct event *), void *arg, int fd,
+ struct event **t_ptr)
{
int dir = xref->thread_type;
struct event *thread = NULL;
}
}
-static void _thread_add_timer_timeval(const struct xref_threadsched *xref,
- struct thread_master *m,
- void (*func)(struct event *), void *arg,
- struct timeval *time_relative,
- struct event **t_ptr)
+static void _event_add_timer_timeval(const struct xref_threadsched *xref,
+ struct thread_master *m,
+ void (*func)(struct event *), void *arg,
+ struct timeval *time_relative,
+ struct event **t_ptr)
{
struct event *thread;
struct timeval t;
/* Add timer event thread. */
-void _thread_add_timer(const struct xref_threadsched *xref,
- struct thread_master *m, void (*func)(struct event *),
- void *arg, long timer, struct event **t_ptr)
+void _event_add_timer(const struct xref_threadsched *xref,
+ struct thread_master *m, void (*func)(struct event *),
+ void *arg, long timer, struct event **t_ptr)
{
struct timeval trel;
trel.tv_sec = timer;
trel.tv_usec = 0;
- _thread_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
+ _event_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
}
/* Add timer event thread with "millisecond" resolution */
-void _thread_add_timer_msec(const struct xref_threadsched *xref,
- struct thread_master *m,
- void (*func)(struct event *), void *arg, long timer,
- struct event **t_ptr)
+void _event_add_timer_msec(const struct xref_threadsched *xref,
+ struct thread_master *m,
+ void (*func)(struct event *), void *arg, long timer,
+ struct event **t_ptr)
{
struct timeval trel;
trel.tv_sec = timer / 1000;
trel.tv_usec = 1000 * (timer % 1000);
- _thread_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
+ _event_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
}
/* Add timer event thread with "timeval" resolution */
-void _thread_add_timer_tv(const struct xref_threadsched *xref,
- struct thread_master *m, void (*func)(struct event *),
- void *arg, struct timeval *tv, struct event **t_ptr)
+void _event_add_timer_tv(const struct xref_threadsched *xref,
+ struct thread_master *m, void (*func)(struct event *),
+ void *arg, struct timeval *tv, struct event **t_ptr)
{
- _thread_add_timer_timeval(xref, m, func, arg, tv, t_ptr);
+ _event_add_timer_timeval(xref, m, func, arg, tv, t_ptr);
}
/* Add simple event thread. */
-void _thread_add_event(const struct xref_threadsched *xref,
- struct thread_master *m, void (*func)(struct event *),
- void *arg, int val, struct event **t_ptr)
+void _event_add_event(const struct xref_threadsched *xref,
+ struct thread_master *m, void (*func)(struct event *),
+ void *arg, int val, struct event **t_ptr)
{
struct event *thread = NULL;
#define _xref_t_a(addfn, type, m, f, a, v, t) \
({ \
- static const struct xref_threadsched _xref \
- __attribute__((used)) = { \
+ static const struct xref_threadsched _xref __attribute__( \
+ (used)) = { \
.xref = XREF_INIT(XREFT_THREADSCHED, NULL, __func__), \
.funcname = #f, \
.dest = #t, \
- .thread_type = THREAD_ ## type, \
+ .thread_type = THREAD_##type, \
}; \
XREF_LINK(_xref.xref); \
- _thread_add_ ## addfn(&_xref, m, f, a, v, t); \
- }) \
- /* end */
+ _event_add_##addfn(&_xref, m, f, a, v, t); \
+ }) /* end */
-#define thread_add_read(m,f,a,v,t) _xref_t_a(read_write, READ, m,f,a,v,t)
-#define thread_add_write(m,f,a,v,t) _xref_t_a(read_write, WRITE, m,f,a,v,t)
-#define thread_add_timer(m,f,a,v,t) _xref_t_a(timer, TIMER, m,f,a,v,t)
-#define thread_add_timer_msec(m,f,a,v,t) _xref_t_a(timer_msec, TIMER, m,f,a,v,t)
-#define thread_add_timer_tv(m,f,a,v,t) _xref_t_a(timer_tv, TIMER, m,f,a,v,t)
-#define thread_add_event(m,f,a,v,t) _xref_t_a(event, EVENT, m,f,a,v,t)
+#define event_add_read(m, f, a, v, t) _xref_t_a(read_write, READ, m, f, a, v, t)
+#define event_add_write(m, f, a, v, t) \
+ _xref_t_a(read_write, WRITE, m, f, a, v, t)
+#define event_add_timer(m, f, a, v, t) _xref_t_a(timer, TIMER, m, f, a, v, t)
+#define event_add_timer_msec(m, f, a, v, t) \
+ _xref_t_a(timer_msec, TIMER, m, f, a, v, t)
+#define event_add_timer_tv(m, f, a, v, t) \
+ _xref_t_a(timer_tv, TIMER, m, f, a, v, t)
+#define event_add_event(m, f, a, v, t) _xref_t_a(event, EVENT, m, f, a, v, t)
#define thread_execute(m,f,a,v) \
({ \
extern void thread_master_free(struct thread_master *);
extern void thread_master_free_unused(struct thread_master *);
-extern void _thread_add_read_write(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*fn)(struct event *), void *arg,
- int fd, struct event **tref);
-
-extern void _thread_add_timer(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*fn)(struct event *), void *arg, long t,
- struct event **tref);
-
-extern void _thread_add_timer_msec(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*fn)(struct event *), void *arg,
- long t, struct event **tref);
-
-extern void _thread_add_timer_tv(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*fn)(struct event *), void *arg,
- struct timeval *tv, struct event **tref);
-
-extern void _thread_add_event(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*fn)(struct event *), void *arg, int val,
- struct event **tref);
+extern void _event_add_read_write(const struct xref_threadsched *xref,
+ struct thread_master *master,
+ void (*fn)(struct event *), void *arg, int fd,
+ struct event **tref);
+
+extern void _event_add_timer(const struct xref_threadsched *xref,
+ struct thread_master *master,
+ void (*fn)(struct event *), void *arg, long t,
+ struct event **tref);
+
+extern void _event_add_timer_msec(const struct xref_threadsched *xref,
+ struct thread_master *master,
+ void (*fn)(struct event *), void *arg, long t,
+ struct event **tref);
+
+extern void _event_add_timer_tv(const struct xref_threadsched *xref,
+ struct thread_master *master,
+ void (*fn)(struct event *), void *arg,
+ struct timeval *tv, struct event **tref);
+
+extern void _event_add_event(const struct xref_threadsched *xref,
+ struct thread_master *master,
+ void (*fn)(struct event *), void *arg, int val,
+ struct event **tref);
extern void _thread_execute(const struct xref_threadsched *xref,
struct thread_master *master,
/* stop function, called from other threads to halt this one */
static int fpt_halt(struct frr_pthread *fpt, void **res)
{
- thread_add_event(fpt->master, &fpt_finish, fpt, 0, NULL);
+ event_add_event(fpt->master, &fpt_finish, fpt, 0, NULL);
pthread_join(fpt->thread, res);
return 0;
int sleeper[2];
pipe(sleeper);
- thread_add_read(fpt->master, &fpt_dummy, NULL, sleeper[0], NULL);
+ event_add_read(fpt->master, &fpt_dummy, NULL, sleeper[0], NULL);
fpt->master->handle_signals = false;
if (read)
frrzmq_check_events(cbp, &cb->write, ZMQ_POLLOUT);
- thread_add_read(t->master, frrzmq_read_msg, cbp,
- cb->fd, &cb->read.thread);
+ event_add_read(t->master, frrzmq_read_msg, cbp, cb->fd,
+ &cb->read.thread);
return;
out_err:
cb->read.cb_error(cb->read.arg, cb->zmqsock);
}
-int _frrzmq_thread_add_read(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*msgfunc)(void *arg, void *zmqsock),
- void (*partfunc)(void *arg, void *zmqsock,
- zmq_msg_t *msg, unsigned partnum),
- void (*errfunc)(void *arg, void *zmqsock),
- void *arg, void *zmqsock,
- struct frrzmq_cb **cbp)
+int _frrzmq_event_add_read(const struct xref_threadsched *xref,
+ struct thread_master *master,
+ void (*msgfunc)(void *arg, void *zmqsock),
+ void (*partfunc)(void *arg, void *zmqsock,
+ zmq_msg_t *msg, unsigned partnum),
+ void (*errfunc)(void *arg, void *zmqsock), void *arg,
+ void *zmqsock, struct frrzmq_cb **cbp)
{
int fd, events;
size_t len;
if (events & ZMQ_POLLIN) {
thread_cancel(&cb->read.thread);
- thread_add_event(master, frrzmq_read_msg, cbp, fd,
- &cb->read.thread);
- } else
- thread_add_read(master, frrzmq_read_msg, cbp, fd,
+ event_add_event(master, frrzmq_read_msg, cbp, fd,
&cb->read.thread);
+ } else
+ event_add_read(master, frrzmq_read_msg, cbp, fd,
+ &cb->read.thread);
return 0;
}
if (written)
frrzmq_check_events(cbp, &cb->read, ZMQ_POLLIN);
- thread_add_write(t->master, frrzmq_write_msg, cbp,
- cb->fd, &cb->write.thread);
+ event_add_write(t->master, frrzmq_write_msg, cbp, cb->fd,
+ &cb->write.thread);
return;
out_err:
cb->write.cb_error(cb->write.arg, cb->zmqsock);
}
-int _frrzmq_thread_add_write(const struct xref_threadsched *xref,
- struct thread_master *master,
- void (*msgfunc)(void *arg, void *zmqsock),
- void (*errfunc)(void *arg, void *zmqsock),
- void *arg, void *zmqsock, struct frrzmq_cb **cbp)
+int _frrzmq_event_add_write(const struct xref_threadsched *xref,
+ struct thread_master *master,
+ void (*msgfunc)(void *arg, void *zmqsock),
+ void (*errfunc)(void *arg, void *zmqsock),
+ void *arg, void *zmqsock, struct frrzmq_cb **cbp)
{
int fd, events;
size_t len;
if (events & ZMQ_POLLOUT) {
thread_cancel(&cb->write.thread);
- _thread_add_event(xref, master, frrzmq_write_msg, cbp, fd,
- &cb->write.thread);
- } else
- thread_add_write(master, frrzmq_write_msg, cbp, fd,
+ _event_add_event(xref, master, frrzmq_write_msg, cbp, fd,
&cb->write.thread);
+ } else
+ event_add_write(master, frrzmq_write_msg, cbp, fd,
+ &cb->write.thread);
return 0;
}
thread_cancel(&core->thread);
if (event == ZMQ_POLLIN)
- thread_add_event(tm, frrzmq_read_msg,
- cbp, cb->fd, &core->thread);
+ event_add_event(tm, frrzmq_read_msg, cbp, cb->fd,
+ &core->thread);
else
- thread_add_event(tm, frrzmq_write_msg,
- cbp, cb->fd, &core->thread);
+ event_add_event(tm, frrzmq_write_msg, cbp, cb->fd,
+ &core->thread);
}
}
/* end */
/* core event registration, one of these 2 macros should be used */
-#define frrzmq_thread_add_read_msg(m, f, e, a, z, d) \
+#define frrzmq_event_add_read_msg(m, f, e, a, z, d) \
_xref_zmq_a(READ, f, d, \
- _frrzmq_thread_add_read(&_xref, m, f, NULL, e, a, z, d))
+ _frrzmq_event_add_read(&_xref, m, f, NULL, e, a, z, d))
-#define frrzmq_thread_add_read_part(m, f, e, a, z, d) \
+#define frrzmq_event_add_read_part(m, f, e, a, z, d) \
_xref_zmq_a(READ, f, d, \
- _frrzmq_thread_add_read(&_xref, m, NULL, f, e, a, z, d))
+ _frrzmq_event_add_read(&_xref, m, NULL, f, e, a, z, d))
-#define frrzmq_thread_add_write_msg(m, f, e, a, z, d) \
+#define frrzmq_event_add_write_msg(m, f, e, a, z, d) \
_xref_zmq_a(WRITE, f, d, \
- _frrzmq_thread_add_write(&_xref, m, f, e, a, z, d))
+ _frrzmq_event_add_write(&_xref, m, f, e, a, z, d))
struct cb_core;
struct frrzmq_cb;
* may schedule the event to run as soon as libfrr is back in its main
* loop.
*/
-extern int _frrzmq_thread_add_read(
- const struct xref_threadsched *xref, struct thread_master *master,
- void (*msgfunc)(void *arg, void *zmqsock),
- void (*partfunc)(void *arg, void *zmqsock, zmq_msg_t *msg,
- unsigned partnum),
- void (*errfunc)(void *arg, void *zmqsock), void *arg, void *zmqsock,
- struct frrzmq_cb **cb);
-extern int _frrzmq_thread_add_write(
- const struct xref_threadsched *xref, struct thread_master *master,
- void (*msgfunc)(void *arg, void *zmqsock),
- void (*errfunc)(void *arg, void *zmqsock), void *arg, void *zmqsock,
- struct frrzmq_cb **cb);
+extern int
+_frrzmq_event_add_read(const struct xref_threadsched *xref,
+ struct thread_master *master,
+ void (*msgfunc)(void *arg, void *zmqsock),
+ void (*partfunc)(void *arg, void *zmqsock,
+ zmq_msg_t *msg, unsigned partnum),
+ void (*errfunc)(void *arg, void *zmqsock), void *arg,
+ void *zmqsock, struct frrzmq_cb **cb);
+extern int _frrzmq_event_add_write(const struct xref_threadsched *xref,
+ struct thread_master *master,
+ void (*msgfunc)(void *arg, void *zmqsock),
+ void (*errfunc)(void *arg, void *zmqsock),
+ void *arg, void *zmqsock,
+ struct frrzmq_cb **cb);
extern void frrzmq_thread_cancel(struct frrzmq_cb **cb, struct cb_core *core);
exit(0);
}
- thread_add_event(master, frr_config_read_in, NULL, 0,
- &di->read_in);
+ event_add_event(master, frr_config_read_in, NULL, 0,
+ &di->read_in);
}
if (di->daemon_mode || di->terminal)
}
out:
- thread_add_read(master, frr_daemon_ctl, NULL, daemon_ctl_sock,
- &daemon_ctl_thread);
+ event_add_read(master, frr_daemon_ctl, NULL, daemon_ctl_sock,
+ &daemon_ctl_thread);
}
void frr_detach(void)
vty_stdio(frr_terminal_close);
if (daemon_ctl_sock != -1) {
set_nonblocking(daemon_ctl_sock);
- thread_add_read(master, frr_daemon_ctl, NULL,
- daemon_ctl_sock, &daemon_ctl_thread);
+ event_add_read(master, frr_daemon_ctl, NULL,
+ daemon_ctl_sock, &daemon_ctl_thread);
}
} else if (di->daemon_mode) {
int nullfd = open("/dev/null", O_RDONLY | O_NOCTTY);
switch (event) {
case MGMTD_BE_CONN_READ:
- thread_add_read(client_ctx->tm, mgmt_be_client_read,
+ event_add_read(client_ctx->tm, mgmt_be_client_read,
client_ctx, client_ctx->conn_fd,
&client_ctx->conn_read_ev);
assert(client_ctx->conn_read_ev);
break;
case MGMTD_BE_CONN_WRITE:
- thread_add_write(client_ctx->tm, mgmt_be_client_write,
+ event_add_write(client_ctx->tm, mgmt_be_client_write,
client_ctx, client_ctx->conn_fd,
&client_ctx->conn_write_ev);
assert(client_ctx->conn_write_ev);
break;
case MGMTD_BE_PROC_MSG:
tv.tv_usec = MGMTD_BE_MSG_PROC_DELAY_USEC;
- thread_add_timer_tv(client_ctx->tm, mgmt_be_client_proc_msgbufs,
+ event_add_timer_tv(client_ctx->tm, mgmt_be_client_proc_msgbufs,
client_ctx, &tv, &client_ctx->msg_proc_ev);
assert(client_ctx->msg_proc_ev);
break;
case MGMTD_BE_CONN_WRITES_ON:
- thread_add_timer_msec(client_ctx->tm,
+ event_add_timer_msec(client_ctx->tm,
mgmt_be_client_resume_writes, client_ctx,
MGMTD_BE_MSG_WRITE_DELAY_MSEC,
&client_ctx->conn_writes_on);
MGMTD_BE_CLIENT_DBG(
"Scheduling MGMTD Backend server connection retry after %lu seconds",
intvl_secs);
- thread_add_timer(client_ctx->tm, mgmt_be_client_conn_timeout,
+ event_add_timer(client_ctx->tm, mgmt_be_client_conn_timeout,
(void *)client_ctx, intvl_secs,
&client_ctx->conn_retry_tmr);
}
switch (event) {
case MGMTD_FE_CONN_READ:
- thread_add_read(client_ctx->tm, mgmt_fe_client_read,
+ event_add_read(client_ctx->tm, mgmt_fe_client_read,
client_ctx, client_ctx->conn_fd,
&client_ctx->conn_read_ev);
assert(client_ctx->conn_read_ev);
break;
case MGMTD_FE_CONN_WRITE:
- thread_add_write(client_ctx->tm, mgmt_fe_client_write,
+ event_add_write(client_ctx->tm, mgmt_fe_client_write,
client_ctx, client_ctx->conn_fd,
&client_ctx->conn_write_ev);
assert(client_ctx->conn_write_ev);
break;
case MGMTD_FE_PROC_MSG:
tv.tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC;
- thread_add_timer_tv(client_ctx->tm,
+ event_add_timer_tv(client_ctx->tm,
mgmt_fe_client_proc_msgbufs, client_ctx,
&tv, &client_ctx->msg_proc_ev);
assert(client_ctx->msg_proc_ev);
break;
case MGMTD_FE_CONN_WRITES_ON:
- thread_add_timer_msec(
+ event_add_timer_msec(
client_ctx->tm, mgmt_fe_client_resume_writes,
client_ctx, MGMTD_FE_MSG_WRITE_DELAY_MSEC,
&client_ctx->conn_writes_on);
MGMTD_FE_CLIENT_DBG(
"Scheduling MGMTD Frontend server connection retry after %lu seconds",
intvl_secs);
- thread_add_timer(client_ctx->tm, mgmt_fe_client_conn_timeout,
+ event_add_timer(client_ctx->tm, mgmt_fe_client_conn_timeout,
(void *)client_ctx, intvl_secs,
&client_ctx->conn_retry_tmr);
}
confirmed_timeout);
thread_cancel(&vty->t_confirmed_commit_timeout);
- thread_add_timer(master,
- nb_cli_confirmed_commit_timeout, vty,
- confirmed_timeout * 60,
- &vty->t_confirmed_commit_timeout);
+ event_add_timer(master, nb_cli_confirmed_commit_timeout,
+ vty, confirmed_timeout * 60,
+ &vty->t_confirmed_commit_timeout);
} else {
/* Accept commit confirmation. */
vty_out(vty, "%% Commit complete.\n\n");
vty->confirmed_commit_rollback = nb_config_dup(running_config);
vty->t_confirmed_commit_timeout = NULL;
- thread_add_timer(master, nb_cli_confirmed_commit_timeout, vty,
- confirmed_timeout * 60,
- &vty->t_confirmed_commit_timeout);
+ event_add_timer(master, nb_cli_confirmed_commit_timeout, vty,
+ confirmed_timeout * 60,
+ &vty->t_confirmed_commit_timeout);
}
context.client = NB_CLIENT_CLI;
int *subp = NULL;
int reslen = 0;
- thread_add_read(master, frr_confd_cdb_read_cb, NULL, fd, &t_cdb_sub);
+ event_add_read(master, frr_confd_cdb_read_cb, NULL, fd, &t_cdb_sub);
if (cdb_read_subscription_socket2(fd, &cdb_ev, &flags, &subp, &reslen)
!= CONFD_OK) {
}
pthread_detach(cdb_trigger_thread);
- thread_add_read(master, frr_confd_cdb_read_cb, NULL, cdb_sub_sock,
- &t_cdb_sub);
+ event_add_read(master, frr_confd_cdb_read_cb, NULL, cdb_sub_sock,
+ &t_cdb_sub);
return 0;
struct confd_daemon_ctx *dctx = THREAD_ARG(thread);
int fd = THREAD_FD(thread);
- thread_add_read(master, frr_confd_dp_ctl_read, dctx, fd, &t_dp_ctl);
+ event_add_read(master, frr_confd_dp_ctl_read, dctx, fd, &t_dp_ctl);
frr_confd_dp_read(dctx, fd);
}
struct confd_daemon_ctx *dctx = THREAD_ARG(thread);
int fd = THREAD_FD(thread);
- thread_add_read(master, frr_confd_dp_worker_read, dctx, fd, &t_dp_worker);
+ event_add_read(master, frr_confd_dp_worker_read, dctx, fd,
+ &t_dp_worker);
frr_confd_dp_read(dctx, fd);
}
goto error;
}
- thread_add_read(master, frr_confd_dp_ctl_read, dctx, dp_ctl_sock,
- &t_dp_ctl);
- thread_add_read(master, frr_confd_dp_worker_read, dctx, dp_worker_sock,
- &t_dp_worker);
+ event_add_read(master, frr_confd_dp_ctl_read, dctx, dp_ctl_sock,
+ &t_dp_ctl);
+ event_add_read(master, frr_confd_dp_worker_read, dctx, dp_worker_sock,
+ &t_dp_worker);
return 0;
* state will either be MORE or FINISH. It will always be FINISH
* for Unary RPCs.
*/
- thread_add_event(main_master, c_callback, (void *)this, 0,
- NULL);
+ event_add_event(main_master, c_callback, (void *)this, 0, NULL);
pthread_mutex_lock(&this->cmux);
while (this->state == PROCESS)
{
main_master = tm;
hook_register(frr_fini, frr_grpc_finish);
- thread_add_event(tm, frr_grpc_module_very_late_init, NULL, 0, NULL);
+ event_add_event(tm, frr_grpc_module_very_late_init, NULL, 0, NULL);
return 0;
}
return;
}
- thread_add_read(master, frr_sr_read_cb, module, fd, &module->sr_thread);
+ event_add_read(master, frr_sr_read_cb, module, fd, &module->sr_thread);
}
static void frr_sr_subscribe_config(struct yang_module *module)
sr_strerror(ret));
goto cleanup;
}
- thread_add_read(master, frr_sr_read_cb, module,
- event_pipe, &module->sr_thread);
+ event_add_read(master, frr_sr_read_cb, module, event_pipe,
+ &module->sr_thread);
}
hook_register(nb_notification_send, frr_sr_notification_send);
if (pullwr->writer)
return;
- thread_add_timer(pullwr->tm, pullwr_run, pullwr, 0, &pullwr->writer);
+ event_add_timer(pullwr->tm, pullwr_run, pullwr, 0, &pullwr->writer);
}
static size_t pullwr_iov(struct pullwr *pullwr, struct iovec *iov)
if (pullwr->valid == 0) {
/* we made a fill() call above that didn't feed any
* data in, and we have nothing more queued, so we go
- * into idle, i.e. no calling thread_add_write()
+ * into idle, i.e. no calling event_add_write()
*/
pullwr_resize(pullwr, 0);
return;
* is full and we go wait until it's available for writing again.
*/
- thread_add_write(pullwr->tm, pullwr_run, pullwr, pullwr->fd,
+ event_add_write(pullwr->tm, pullwr_run, pullwr, pullwr->fd,
&pullwr->writer);
/* if we hit the time limit, just keep the buffer, we'll probably need
struct resolver_fd *resfd = THREAD_ARG(t);
struct resolver_state *r = resfd->state;
- thread_add_read(r->master, resolver_cb_socket_readable, resfd,
- resfd->fd, &resfd->t_read);
+ event_add_read(r->master, resolver_cb_socket_readable, resfd, resfd->fd,
+ &resfd->t_read);
/* ^ ordering important:
* ares_process_fd may transitively call THREAD_OFF(resfd->t_read)
* combined with resolver_fd_drop_maybe, so resfd may be free'd after!
struct resolver_fd *resfd = THREAD_ARG(t);
struct resolver_state *r = resfd->state;
- thread_add_write(r->master, resolver_cb_socket_writable, resfd,
- resfd->fd, &resfd->t_write);
+ event_add_write(r->master, resolver_cb_socket_writable, resfd,
+ resfd->fd, &resfd->t_write);
/* ^ ordering important:
* ares_process_fd may transitively call THREAD_OFF(resfd->t_write)
* combined with resolver_fd_drop_maybe, so resfd may be free'd after!
if (tv) {
unsigned int timeoutms = tv->tv_sec * 1000 + tv->tv_usec / 1000;
- thread_add_timer_msec(r->master, resolver_cb_timeout, r,
- timeoutms, &r->timeout);
+ event_add_timer_msec(r->master, resolver_cb_timeout, r,
+ timeoutms, &r->timeout);
}
}
if (!readable)
THREAD_OFF(resfd->t_read);
else if (!resfd->t_read)
- thread_add_read(r->master, resolver_cb_socket_readable, resfd,
- fd, &resfd->t_read);
+ event_add_read(r->master, resolver_cb_socket_readable, resfd,
+ fd, &resfd->t_read);
if (!writable)
THREAD_OFF(resfd->t_write);
else if (!resfd->t_write)
- thread_add_write(r->master, resolver_cb_socket_writable, resfd,
- fd, &resfd->t_write);
+ event_add_write(r->master, resolver_cb_socket_writable, resfd,
+ fd, &resfd->t_write);
resolver_fd_drop_maybe(resfd);
}
/* for consistency with proper name lookup, don't call the
* callback immediately; defer to thread loop
*/
- thread_add_timer_msec(state.master, resolver_cb_literal,
- query, 0, &query->literal_cb);
+ event_add_timer_msec(state.master, resolver_cb_literal, query,
+ 0, &query->literal_cb);
return;
}
sigm = THREAD_ARG(t);
sigm->t = NULL;
- thread_add_timer(sigm->t->master, frr_signal_timer, &sigmaster,
- FRR_SIGNAL_TIMER_INTERVAL, &sigm->t);
+ event_add_timer(sigm->t->master, frr_signal_timer, &sigmaster,
+ FRR_SIGNAL_TIMER_INTERVAL, &sigm->t);
frr_sigevent_process();
}
#endif /* SIGEVENT_SCHEDULE_THREAD */
#ifdef SIGEVENT_SCHEDULE_THREAD
sigmaster.t = NULL;
- thread_add_timer(m, frr_signal_timer, &sigmaster,
- FRR_SIGNAL_TIMER_INTERVAL, &sigmaster.t);
+ event_add_timer(m, frr_signal_timer, &sigmaster,
+ FRR_SIGNAL_TIMER_INTERVAL, &sigmaster.t);
#endif /* SIGEVENT_SCHEDULE_THREAD */
}
switch (backoff->state) {
case SPF_BACKOFF_QUIET:
backoff->state = SPF_BACKOFF_SHORT_WAIT;
- thread_add_timer_msec(
+ event_add_timer_msec(
backoff->m, spf_backoff_timetolearn_elapsed, backoff,
backoff->timetolearn, &backoff->t_timetolearn);
- thread_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
- backoff, backoff->holddown,
- &backoff->t_holddown);
+ event_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
+ backoff, backoff->holddown,
+ &backoff->t_holddown);
backoff->first_event_time = now;
rv = backoff->init_delay;
break;
case SPF_BACKOFF_SHORT_WAIT:
case SPF_BACKOFF_LONG_WAIT:
thread_cancel(&backoff->t_holddown);
- thread_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
- backoff, backoff->holddown,
- &backoff->t_holddown);
+ event_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
+ backoff, backoff->holddown,
+ &backoff->t_holddown);
if (backoff->state == SPF_BACKOFF_SHORT_WAIT)
rv = backoff->short_delay;
else
systemd_send_information("WATCHDOG=1");
assert(watchdog_msec > 0);
- thread_add_timer_msec(systemd_master, systemd_send_watchdog, NULL,
- watchdog_msec, NULL);
+ event_add_timer_msec(systemd_master, systemd_send_watchdog, NULL,
+ watchdog_msec, NULL);
}
void systemd_send_started(struct thread_master *m)
{
switch (event) {
case VTY_SERV:
- thread_add_read(vty_master, vty_accept, vty_serv,
- vty_serv->sock, &vty_serv->t_accept);
+ event_add_read(vty_master, vty_accept, vty_serv, vty_serv->sock,
+ &vty_serv->t_accept);
break;
#ifdef VTYSH
case VTYSH_SERV:
- thread_add_read(vty_master, vtysh_accept, vty_serv,
- vty_serv->sock, &vty_serv->t_accept);
+ event_add_read(vty_master, vtysh_accept, vty_serv,
+ vty_serv->sock, &vty_serv->t_accept);
break;
#endif /* VTYSH */
case VTY_READ:
switch (event) {
#ifdef VTYSH
case VTYSH_READ:
- thread_add_read(vty_master, vtysh_read, vty, vty->fd,
- &vty->t_read);
+ event_add_read(vty_master, vtysh_read, vty, vty->fd,
+ &vty->t_read);
break;
case VTYSH_WRITE:
- thread_add_write(vty_master, vtysh_write, vty, vty->wfd,
- &vty->t_write);
+ event_add_write(vty_master, vtysh_write, vty, vty->wfd,
+ &vty->t_write);
break;
#endif /* VTYSH */
case VTY_READ:
- thread_add_read(vty_master, vty_read, vty, vty->fd,
- &vty->t_read);
+ event_add_read(vty_master, vty_read, vty, vty->fd,
+ &vty->t_read);
/* Time out treatment. */
if (vty->v_timeout) {
THREAD_OFF(vty->t_timeout);
- thread_add_timer(vty_master, vty_timeout, vty,
- vty->v_timeout, &vty->t_timeout);
+ event_add_timer(vty_master, vty_timeout, vty,
+ vty->v_timeout, &vty->t_timeout);
}
break;
case VTY_WRITE:
- thread_add_write(vty_master, vty_flush, vty, vty->wfd,
- &vty->t_write);
+ event_add_write(vty_master, vty_flush, vty, vty->wfd,
+ &vty->t_write);
break;
case VTY_TIMEOUT_RESET:
THREAD_OFF(vty->t_timeout);
if (vty->v_timeout)
- thread_add_timer(vty_master, vty_timeout, vty,
- vty->v_timeout, &vty->t_timeout);
+ event_add_timer(vty_master, vty_timeout, vty,
+ vty->v_timeout, &vty->t_timeout);
break;
case VTY_SERV:
case VTYSH_SERV:
slots_to_skip++;
wheel->slots_to_skip = slots_to_skip;
- thread_add_timer_msec(wheel->master, wheel_timer_thread, wheel,
- wheel->nexttime * slots_to_skip, &wheel->timer);
+ event_add_timer_msec(wheel->master, wheel_timer_thread, wheel,
+ wheel->nexttime * slots_to_skip, &wheel->timer);
}
static void wheel_timer_thread(struct event *t)
for (i = 0; i < slots; i++)
wheel->wheel_slot_lists[i] = list_new();
- thread_add_timer_msec(wheel->master, wheel_timer_thread, wheel,
- wheel->nexttime, &wheel->timer);
+ event_add_timer_msec(wheel->master, wheel_timer_thread, wheel,
+ wheel->nexttime, &wheel->timer);
return wheel;
}
* as an 'event'
*/
if (delay > 0) {
- thread_add_timer_msec(wq->master, work_queue_run, wq,
- delay, &wq->thread);
+ event_add_timer_msec(wq->master, work_queue_run, wq,
+ delay, &wq->thread);
thread_ignore_late_timer(wq->thread);
} else
- thread_add_event(wq->master, work_queue_run, wq, 0,
- &wq->thread);
+ event_add_event(wq->master, work_queue_run, wq, 0,
+ &wq->thread);
/* set thread yield time, if needed */
if (thread_is_scheduled(wq->thread) &&
return;
case BUFFER_PENDING:
zclient->t_write = NULL;
- thread_add_write(zclient->master, zclient_flush_data, zclient,
- zclient->sock, &zclient->t_write);
+ event_add_write(zclient->master, zclient_flush_data, zclient,
+ zclient->sock, &zclient->t_write);
break;
case BUFFER_EMPTY:
if (zclient->zebra_buffer_write_ready)
THREAD_OFF(zclient->t_write);
return ZCLIENT_SEND_SUCCESS;
case BUFFER_PENDING:
- thread_add_write(zclient->master, zclient_flush_data, zclient,
- zclient->sock, &zclient->t_write);
+ event_add_write(zclient->master, zclient_flush_data, zclient,
+ zclient->sock, &zclient->t_write);
return ZCLIENT_SEND_BUFFERED;
}
{
switch (event) {
case ZCLIENT_SCHEDULE:
- thread_add_event(zclient->master, zclient_connect, zclient, 0,
- &zclient->t_connect);
+ event_add_event(zclient->master, zclient_connect, zclient, 0,
+ &zclient->t_connect);
break;
case ZCLIENT_CONNECT:
if (zclient_debug)
zlog_debug(
"zclient connect failures: %d schedule interval is now %d",
zclient->fail, zclient->fail < 3 ? 10 : 60);
- thread_add_timer(zclient->master, zclient_connect, zclient,
- zclient->fail < 3 ? 10 : 60,
- &zclient->t_connect);
+ event_add_timer(zclient->master, zclient_connect, zclient,
+ zclient->fail < 3 ? 10 : 60,
+ &zclient->t_connect);
break;
case ZCLIENT_READ:
zclient->t_read = NULL;
- thread_add_read(zclient->master, zclient_read, zclient,
- zclient->sock, &zclient->t_read);
+ event_add_read(zclient->master, zclient_read, zclient,
+ zclient->sock, &zclient->t_read);
break;
}
}
ret = read(fd, dummy, sizeof(dummy));
if (ret > 0) {
/* logger is sending us something?!?! */
- thread_add_read(t->master, zlog_5424_reconnect, zcf, fd,
- &zcf->t_reconnect);
+ event_add_read(t->master, zlog_5424_reconnect, zcf, fd,
+ &zcf->t_reconnect);
return;
}
assert(zcf->master);
if (fd != -1) {
- thread_add_read(zcf->master, zlog_5424_reconnect, zcf,
- fd, &zcf->t_reconnect);
+ event_add_read(zcf->master, zlog_5424_reconnect, zcf,
+ fd, &zcf->t_reconnect);
zcf->reconn_backoff_cur = zcf->reconn_backoff;
} else {
- thread_add_timer_msec(zcf->master, zlog_5424_reconnect,
- zcf, zcf->reconn_backoff_cur,
- &zcf->t_reconnect);
+ event_add_timer_msec(zcf->master, zlog_5424_reconnect,
+ zcf, zcf->reconn_backoff_cur,
+ &zcf->t_reconnect);
zcf->reconn_backoff_cur += zcf->reconn_backoff_cur / 2;
if (zcf->reconn_backoff_cur > zcf->reconn_backoff_max)
switch (event) {
case MGMTD_BE_CONN_INIT:
- thread_add_timer_msec(mgmt_be_adapter_tm,
+ event_add_timer_msec(mgmt_be_adapter_tm,
mgmt_be_adapter_conn_init, adapter,
MGMTD_BE_CONN_INIT_DELAY_MSEC,
&adapter->conn_init_ev);
assert(adapter->conn_init_ev);
break;
case MGMTD_BE_CONN_READ:
- thread_add_read(mgmt_be_adapter_tm, mgmt_be_adapter_read,
+ event_add_read(mgmt_be_adapter_tm, mgmt_be_adapter_read,
adapter, adapter->conn_fd, &adapter->conn_read_ev);
assert(adapter->conn_read_ev);
break;
MGMTD_BE_ADAPTER_DBG(
"scheduling write ready notify for client %s",
adapter->name);
- thread_add_write(mgmt_be_adapter_tm, mgmt_be_adapter_write,
+ event_add_write(mgmt_be_adapter_tm, mgmt_be_adapter_write,
adapter, adapter->conn_fd, &adapter->conn_write_ev);
assert(adapter->conn_write_ev);
break;
case MGMTD_BE_PROC_MSG:
tv.tv_usec = MGMTD_BE_MSG_PROC_DELAY_USEC;
- thread_add_timer_tv(mgmt_be_adapter_tm,
+ event_add_timer_tv(mgmt_be_adapter_tm,
mgmt_be_adapter_proc_msgbufs, adapter, &tv,
&adapter->proc_msg_ev);
assert(adapter->proc_msg_ev);
break;
case MGMTD_BE_CONN_WRITES_ON:
- thread_add_timer_msec(mgmt_be_adapter_tm,
+ event_add_timer_msec(mgmt_be_adapter_tm,
mgmt_be_adapter_resume_writes, adapter,
MGMTD_BE_MSG_WRITE_DELAY_MSEC,
&adapter->conn_writes_on);
static void mgmt_be_server_register_event(enum mgmt_be_event event)
{
if (event == MGMTD_BE_SERVER) {
- thread_add_read(mgmt_be_listen_tm, mgmt_be_conn_accept,
+ event_add_read(mgmt_be_listen_tm, mgmt_be_conn_accept,
NULL, mgmt_be_listen_fd,
&mgmt_be_listen_ev);
assert(mgmt_be_listen_ev);
switch (event) {
case MGMTD_FE_SESSION_CFG_TXN_CLNUP:
- thread_add_timer_tv(mgmt_fe_adapter_tm,
+ event_add_timer_tv(mgmt_fe_adapter_tm,
mgmt_fe_session_cfg_txn_clnup, session,
&tv, &session->proc_cfg_txn_clnp);
assert(session->proc_cfg_txn_clnp);
break;
case MGMTD_FE_SESSION_SHOW_TXN_CLNUP:
- thread_add_timer_tv(mgmt_fe_adapter_tm,
+ event_add_timer_tv(mgmt_fe_adapter_tm,
mgmt_fe_session_show_txn_clnup, session,
&tv, &session->proc_show_txn_clnp);
assert(session->proc_show_txn_clnp);
switch (event) {
case MGMTD_FE_CONN_READ:
- thread_add_read(mgmt_fe_adapter_tm, mgmt_fe_adapter_read,
+ event_add_read(mgmt_fe_adapter_tm, mgmt_fe_adapter_read,
adapter, adapter->conn_fd, &adapter->conn_read_ev);
assert(adapter->conn_read_ev);
break;
case MGMTD_FE_CONN_WRITE:
- thread_add_write(mgmt_fe_adapter_tm,
+ event_add_write(mgmt_fe_adapter_tm,
mgmt_fe_adapter_write, adapter,
adapter->conn_fd, &adapter->conn_write_ev);
assert(adapter->conn_write_ev);
break;
case MGMTD_FE_PROC_MSG:
tv.tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC;
- thread_add_timer_tv(mgmt_fe_adapter_tm,
+ event_add_timer_tv(mgmt_fe_adapter_tm,
mgmt_fe_adapter_proc_msgbufs, adapter,
&tv, &adapter->proc_msg_ev);
assert(adapter->proc_msg_ev);
break;
case MGMTD_FE_CONN_WRITES_ON:
- thread_add_timer_msec(mgmt_fe_adapter_tm,
+ event_add_timer_msec(mgmt_fe_adapter_tm,
mgmt_fe_adapter_resume_writes, adapter,
MGMTD_FE_MSG_WRITE_DELAY_MSEC,
&adapter->conn_writes_on);
static void mgmt_fe_server_register_event(enum mgmt_fe_event event)
{
if (event == MGMTD_FE_SERVER) {
- thread_add_read(mgmt_fe_listen_tm, mgmt_fe_conn_accept,
+ event_add_read(mgmt_fe_listen_tm, mgmt_fe_conn_accept,
NULL, mgmt_fe_listen_fd,
&mgmt_fe_listen_ev);
assert(mgmt_fe_listen_ev);
switch (event) {
case MGMTD_TXN_PROC_SETCFG:
- thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg,
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg,
txn, &tv, &txn->proc_set_cfg);
assert(txn->proc_set_cfg);
break;
case MGMTD_TXN_PROC_COMMITCFG:
- thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_commit_cfg,
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_commit_cfg,
txn, &tv, &txn->proc_comm_cfg);
assert(txn->proc_comm_cfg);
break;
case MGMTD_TXN_PROC_GETCFG:
- thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg,
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg,
txn, &tv, &txn->proc_get_cfg);
assert(txn->proc_get_cfg);
break;
case MGMTD_TXN_PROC_GETDATA:
- thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_data,
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_data,
txn, &tv, &txn->proc_get_data);
assert(txn->proc_get_data);
break;
case MGMTD_TXN_COMMITCFG_TIMEOUT:
- thread_add_timer_msec(mgmt_txn_tm,
+ event_add_timer_msec(mgmt_txn_tm,
mgmt_txn_cfg_commit_timedout, txn,
MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC,
&txn->comm_cfg_timeout);
break;
case MGMTD_TXN_CLEANUP:
tv.tv_usec = MGMTD_TXN_CLEANUP_DELAY_USEC;
- thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_cleanup, txn, &tv,
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_cleanup, txn, &tv,
&txn->clnup);
assert(txn->clnup);
}
}
}
- thread_add_read(master, netlink_log_recv, 0, netlink_log_fd,
- &netlink_log_thread);
+ event_add_read(master, netlink_log_recv, 0, netlink_log_fd,
+ &netlink_log_thread);
}
void netlink_set_nflog_group(int nlgroup)
return;
netlink_log_register(netlink_log_fd, nlgroup);
- thread_add_read(master, netlink_log_recv, 0, netlink_log_fd,
- &netlink_log_thread);
+ event_add_read(master, netlink_log_recv, 0, netlink_log_fd,
+ &netlink_log_thread);
}
}
switch (c->cur.type) {
case NHRP_CACHE_INVALID:
if (!c->t_auth)
- thread_add_timer_msec(master, nhrp_cache_do_free, c, 10,
- &c->t_timeout);
+ event_add_timer_msec(master, nhrp_cache_do_free, c, 10,
+ &c->t_timeout);
break;
case NHRP_CACHE_INCOMPLETE:
case NHRP_CACHE_NEGATIVE:
case NHRP_CACHE_LOCAL:
case NHRP_CACHE_NUM_TYPES:
if (c->cur.expires)
- thread_add_timer(master, nhrp_cache_do_timeout, c,
- c->cur.expires - monotime(NULL),
- &c->t_timeout);
+ event_add_timer(master, nhrp_cache_do_timeout, c,
+ c->cur.expires - monotime(NULL),
+ &c->t_timeout);
break;
}
}
if (nhrp_peer_check(c->new.peer, 1)) {
evmgr_notify("authorize-binding", c,
nhrp_cache_authorize_binding);
- thread_add_timer(master, nhrp_cache_do_auth_timeout, c,
- 10, &c->t_auth);
+ event_add_timer(master, nhrp_cache_do_auth_timeout, c,
+ 10, &c->t_auth);
}
break;
case NOTIFY_PEER_DOWN:
nhrp_cache_newpeer_notifier);
nhrp_cache_newpeer_notifier(&c->newpeer_notifier,
NOTIFY_PEER_UP);
- thread_add_timer(master, nhrp_cache_do_auth_timeout, c,
- 60, &c->t_auth);
+ event_add_timer(master, nhrp_cache_do_auth_timeout, c,
+ 60, &c->t_auth);
}
}
nhrp_cache_update_timers(c);
close(evmgr->fd);
evmgr->fd = -1;
if (nhrp_event_socket_path)
- thread_add_timer_msec(master, evmgr_reconnect, evmgr, 10,
- &evmgr->t_reconnect);
+ event_add_timer_msec(master, evmgr_reconnect, evmgr, 10,
+ &evmgr->t_reconnect);
}
static void evmgr_recv_message(struct event_manager *evmgr, struct zbuf *zb)
while (zbuf_may_pull_until(ibuf, "\n\n", &msg))
evmgr_recv_message(evmgr, &msg);
- thread_add_read(master, evmgr_read, evmgr, evmgr->fd, &evmgr->t_read);
+ event_add_read(master, evmgr_read, evmgr, evmgr->fd, &evmgr->t_read);
}
static void evmgr_write(struct event *t)
r = zbufq_write(&evmgr->obuf, evmgr->fd);
if (r > 0) {
- thread_add_write(master, evmgr_write, evmgr, evmgr->fd,
- &evmgr->t_write);
+ event_add_write(master, evmgr_write, evmgr, evmgr->fd,
+ &evmgr->t_write);
} else if (r < 0) {
evmgr_connection_error(evmgr);
}
zbuf_put(obuf, "\n", 1);
zbufq_queue(&evmgr->obuf, obuf);
if (evmgr->fd >= 0)
- thread_add_write(master, evmgr_write, evmgr, evmgr->fd,
- &evmgr->t_write);
+ event_add_write(master, evmgr_write, evmgr, evmgr->fd,
+ &evmgr->t_write);
}
static void evmgr_reconnect(struct event *t)
zlog_warn("%s: failure connecting nhrp-event socket: %s",
__func__, strerror(errno));
zbufq_reset(&evmgr->obuf);
- thread_add_timer(master, evmgr_reconnect, evmgr, 10,
- &evmgr->t_reconnect);
+ event_add_timer(master, evmgr_reconnect, evmgr, 10,
+ &evmgr->t_reconnect);
return;
}
zlog_info("Connected to Event Manager");
evmgr->fd = fd;
- thread_add_read(master, evmgr_read, evmgr, evmgr->fd, &evmgr->t_read);
+ event_add_read(master, evmgr_read, evmgr, evmgr->fd, &evmgr->t_read);
}
static struct event_manager evmgr_connection;
evmgr->fd = -1;
zbuf_init(&evmgr->ibuf, evmgr->ibuf_data, sizeof(evmgr->ibuf_data), 0);
zbufq_init(&evmgr->obuf);
- thread_add_timer_msec(master, evmgr_reconnect, evmgr, 10,
- &evmgr->t_reconnect);
+ event_add_timer_msec(master, evmgr_reconnect, evmgr, 10,
+ &evmgr->t_reconnect);
}
void evmgr_set_socket(const char *socket)
}
}
- thread_add_read(master, netlink_mcast_log_recv, 0, netlink_mcast_log_fd,
- &netlink_mcast_log_thread);
+ event_add_read(master, netlink_mcast_log_recv, 0, netlink_mcast_log_fd,
+ &netlink_mcast_log_thread);
}
static void netlink_mcast_log_register(int fd, int group)
return;
netlink_mcast_log_register(netlink_mcast_log_fd, nlgroup);
- thread_add_read(master, netlink_mcast_log_recv, 0,
- netlink_mcast_log_fd,
- &netlink_mcast_log_thread);
+ event_add_read(master, netlink_mcast_log_recv, 0,
+ netlink_mcast_log_fd, &netlink_mcast_log_thread);
debugf(NHRP_DEBUG_COMMON, "Register nflog group: %d",
netlink_mcast_nflog_group);
}
/* RFC 2332 5.2.3 - Registration is recommend to be renewed
* every one third of holdtime */
- thread_add_timer(master, nhrp_reg_send_req, r, holdtime / 3,
- &r->t_register);
+ event_add_timer(master, nhrp_reg_send_req, r, holdtime / 3,
+ &r->t_register);
r->proto_addr = p->dst_proto;
c = nhrp_cache_get(ifp, &p->dst_proto, 1);
}
r->timeout = 2;
}
- thread_add_timer_msec(master, nhrp_reg_send_req, r, 10, &r->t_register);
+ event_add_timer_msec(master, nhrp_reg_send_req, r, 10, &r->t_register);
}
static void nhrp_reg_peer_notify(struct notifier_block *n, unsigned long cmd)
debugf(NHRP_DEBUG_COMMON, "NHS: Flush timer for %pSU",
&r->peer->vc->remote.nbma);
THREAD_OFF(r->t_register);
- thread_add_timer_msec(master, nhrp_reg_send_req, r, 10,
- &r->t_register);
+ event_add_timer_msec(master, nhrp_reg_send_req, r, 10,
+ &r->t_register);
break;
}
}
if (!nhrp_peer_check(r->peer, 2)) {
debugf(NHRP_DEBUG_COMMON, "NHS: Waiting link for %pSU",
&r->peer->vc->remote.nbma);
- thread_add_timer(master, nhrp_reg_send_req, r, 120,
- &r->t_register);
+ event_add_timer(master, nhrp_reg_send_req, r, 120,
+ &r->t_register);
return;
}
- thread_add_timer(master, nhrp_reg_timeout, r, r->timeout,
- &r->t_register);
+ event_add_timer(master, nhrp_reg_timeout, r, r->timeout,
+ &r->t_register);
/* RFC2332 5.2.3 NHC uses it's own address as dst if NHS is unknown */
dst_proto = &nhs->proto_addr;
if (n < 0) {
/* Failed, retry in a moment */
- thread_add_timer(master, nhrp_nhs_resolve, nhs, 5,
- &nhs->t_resolve);
+ event_add_timer(master, nhrp_nhs_resolve, nhs, 5,
+ &nhs->t_resolve);
return;
}
- thread_add_timer(master, nhrp_nhs_resolve, nhs, 2 * 60 * 60,
- &nhs->t_resolve);
+ event_add_timer(master, nhrp_nhs_resolve, nhs, 2 * 60 * 60,
+ &nhs->t_resolve);
frr_each (nhrp_reglist, &nhs->reglist_head, reg)
reg->mark = 1;
nhrp_reglist_add_tail(&nhs->reglist_head, reg);
nhrp_peer_notify_add(reg->peer, ®->peer_notifier,
nhrp_reg_peer_notify);
- thread_add_timer_msec(master, nhrp_reg_send_req, reg, 50,
- ®->t_register);
+ event_add_timer_msec(master, nhrp_reg_send_req, reg, 50,
+ ®->t_register);
}
frr_each_safe (nhrp_reglist, &nhs->reglist_head, reg)
.reglist_head = INIT_DLIST(nhs->reglist_head),
};
nhrp_nhslist_add_tail(&nifp->afi[afi].nhslist_head, nhs);
- thread_add_timer_msec(master, nhrp_nhs_resolve, nhs, 1000,
- &nhs->t_resolve);
+ event_add_timer_msec(master, nhrp_nhs_resolve, nhs, 1000,
+ &nhs->t_resolve);
return NHRP_OK;
}
uint8_t addr[64];
size_t len, addrlen;
- thread_add_read(master, nhrp_packet_recvraw, 0, fd, NULL);
+ event_add_read(master, nhrp_packet_recvraw, 0, fd, NULL);
zb = zbuf_alloc(1500);
if (!zb)
int nhrp_packet_init(void)
{
- thread_add_read(master, nhrp_packet_recvraw, 0, os_socket(), NULL);
+ event_add_read(master, nhrp_packet_recvraw, 0, os_socket(), NULL);
return 0;
}
* the up notification a bit to allow things
* settle down. This allows IKE to install
* SPDs and SAs. */
- thread_add_timer_msec(master, nhrp_peer_notify_up, p,
- 50, &p->t_fallback);
+ event_add_timer_msec(master, nhrp_peer_notify_up, p, 50,
+ &p->t_fallback);
} else {
nhrp_peer_ref(p);
p->online = online;
p->fallback_requested = 1;
vici_request_vc(nifp->ipsec_fallback_profile, &vc->local.nbma,
&vc->remote.nbma, p->prio);
- thread_add_timer(master, nhrp_peer_request_timeout, p, 30,
- &p->t_fallback);
+ event_add_timer(master, nhrp_peer_request_timeout, p, 30,
+ &p->t_fallback);
} else {
p->requested = p->fallback_requested = 0;
}
} else {
vici_request_vc(nifp->ipsec_profile, &vc->local.nbma,
&vc->remote.nbma, p->prio);
- thread_add_timer(
- master, nhrp_peer_request_timeout, p,
- (nifp->ipsec_fallback_profile && !p->prio) ? 15 : 30,
- &p->t_fallback);
+ event_add_timer(master, nhrp_peer_request_timeout, p,
+ (nifp->ipsec_fallback_profile && !p->prio) ? 15
+ : 30,
+ &p->t_fallback);
}
}
if (p->prio) {
vici_request_vc(nifp->ipsec_profile, &vc->local.nbma,
&vc->remote.nbma, p->prio);
- thread_add_timer(
- master, nhrp_peer_request_timeout, p,
- (nifp->ipsec_fallback_profile && !p->prio) ? 15 : 30,
- &p->t_fallback);
+ event_add_timer(master, nhrp_peer_request_timeout, p,
+ (nifp->ipsec_fallback_profile && !p->prio) ? 15
+ : 30,
+ &p->t_fallback);
} else {
/* Maximum timeout is 1 second */
int r_time_ms = frr_weak_random() % 1000;
debugf(NHRP_DEBUG_COMMON,
"Initiating IPsec connection request to %pSU after %d ms:",
&vc->remote.nbma, r_time_ms);
- thread_add_timer_msec(master, nhrp_peer_defer_vici_request,
- p, r_time_ms, &p->t_timer);
+ event_add_timer_msec(master, nhrp_peer_defer_vici_request, p,
+ r_time_ms, &p->t_timer);
}
return 0;
{
struct nhrp_shortcut *s = THREAD_ARG(t);
- thread_add_timer(master, nhrp_shortcut_do_purge, s, s->holding_time / 3,
- &s->t_timer);
+ event_add_timer(master, nhrp_shortcut_do_purge, s, s->holding_time / 3,
+ &s->t_timer);
s->expiring = 1;
nhrp_shortcut_check_use(s);
}
if (holding_time) {
s->expiring = 0;
s->holding_time = holding_time;
- thread_add_timer(master, nhrp_shortcut_do_expire, s,
- 2 * holding_time / 3, &s->t_timer);
+ event_add_timer(master, nhrp_shortcut_do_expire, s,
+ 2 * holding_time / 3, &s->t_timer);
}
}
nhrp_reqid_free(&nhrp_packet_reqid, &s->reqid);
THREAD_OFF(s->t_timer);
- thread_add_timer(master, nhrp_shortcut_do_purge, s, 1, &s->t_timer);
+ event_add_timer(master, nhrp_shortcut_do_purge, s, 1, &s->t_timer);
if (pp->hdr->type != NHRP_PACKET_RESOLUTION_REPLY) {
if (pp->hdr->type == NHRP_PACKET_ERROR_INDICATION
if (s && s->type != NHRP_CACHE_INCOMPLETE) {
s->addr = *addr;
THREAD_OFF(s->t_timer);
- thread_add_timer(master, nhrp_shortcut_do_purge, s, 30,
- &s->t_timer);
+ event_add_timer(master, nhrp_shortcut_do_purge, s, 30,
+ &s->t_timer);
nhrp_shortcut_send_resolution_req(s);
}
}
if (force) {
/* Immediate purge on route with draw or pending shortcut */
- thread_add_timer_msec(master, nhrp_shortcut_do_purge, s, 5,
- &s->t_timer);
+ event_add_timer_msec(master, nhrp_shortcut_do_purge, s, 5,
+ &s->t_timer);
} else {
/* Soft expire - force immediate renewal, but purge
* in few seconds to make sure stale route is not
* This allows to keep nhrp route up, and to not
* cause temporary rerouting via hubs causing latency
* jitter. */
- thread_add_timer_msec(master, nhrp_shortcut_do_purge, s, 3000,
- &s->t_timer);
+ event_add_timer_msec(master, nhrp_shortcut_do_purge, s, 3000,
+ &s->t_timer);
s->expiring = 1;
nhrp_shortcut_check_use(s);
}
close(vici->fd);
vici->fd = -1;
- thread_add_timer(master, vici_reconnect, vici, 2, &vici->t_reconnect);
+ event_add_timer(master, vici_reconnect, vici, 2, &vici->t_reconnect);
}
static void vici_parse_message(struct vici_conn *vici, struct zbuf *msg,
vici_recv_message(vici, &pktbuf);
} while (1);
- thread_add_read(master, vici_read, vici, vici->fd, &vici->t_read);
+ event_add_read(master, vici_read, vici, vici->fd, &vici->t_read);
}
static void vici_write(struct event *t)
r = zbufq_write(&vici->obuf, vici->fd);
if (r > 0) {
- thread_add_write(master, vici_write, vici, vici->fd,
- &vici->t_write);
+ event_add_write(master, vici_write, vici, vici->fd,
+ &vici->t_write);
} else if (r < 0) {
vici_connection_error(vici);
}
}
zbufq_queue(&vici->obuf, obuf);
- thread_add_write(master, vici_write, vici, vici->fd, &vici->t_write);
+ event_add_write(master, vici_write, vici, vici->fd, &vici->t_write);
}
static void vici_submit_request(struct vici_conn *vici, const char *name, ...)
debugf(NHRP_DEBUG_VICI,
"%s: failure connecting VICI socket: %s", __func__,
strerror(errno));
- thread_add_timer(master, vici_reconnect, vici, 2,
- &vici->t_reconnect);
+ event_add_timer(master, vici_reconnect, vici, 2,
+ &vici->t_reconnect);
return;
}
debugf(NHRP_DEBUG_COMMON, "VICI: Connected");
vici->fd = fd;
- thread_add_read(master, vici_read, vici, vici->fd, &vici->t_read);
+ event_add_read(master, vici_read, vici, vici->fd, &vici->t_read);
/* Send event subscribtions */
// vici_register_event(vici, "child-updown");
vici->fd = -1;
zbuf_init(&vici->ibuf, vici->ibuf_data, sizeof(vici->ibuf_data), 0);
zbufq_init(&vici->obuf);
- thread_add_timer_msec(master, vici_reconnect, vici, 10,
- &vici->t_reconnect);
+ event_add_timer_msec(master, vici_reconnect, vici, 10,
+ &vici->t_reconnect);
}
void vici_terminate(void)
if (IS_OSPF6_DEBUG_ASBR)
zlog_debug("%s: trigger redistribute reset thread", __func__);
- thread_add_timer_msec(master, ospf6_asbr_routemap_update_timer, ospf6,
- OSPF_MIN_LS_INTERVAL,
- &ospf6->t_distribute_update);
+ event_add_timer_msec(master, ospf6_asbr_routemap_update_timer, ospf6,
+ OSPF_MIN_LS_INTERVAL, &ospf6->t_distribute_update);
}
void ospf6_asbr_routemap_update(const char *mapname)
zlog_debug("%s: LSA found, refresh it",
__func__);
THREAD_OFF(lsa->refresh);
- thread_add_event(master, ospf6_lsa_refresh, lsa, 0,
- &lsa->refresh);
+ event_add_event(master, ospf6_lsa_refresh, lsa, 0,
+ &lsa->refresh);
return;
}
}
if (lsa) {
THREAD_OFF(lsa->refresh);
- thread_add_event(master, ospf6_lsa_refresh, lsa, 0,
- &lsa->refresh);
+ event_add_event(master, ospf6_lsa_refresh, lsa, 0,
+ &lsa->refresh);
} else {
if (IS_OSPF6_DEBUG_AGGR)
zlog_debug("%s: Originate external route(%pFX)",
__func__, ospf6->aggr_delay_interval);
ospf6->aggr_action = operation;
- thread_add_timer(master,
- ospf6_asbr_summary_process,
- ospf6, ospf6->aggr_delay_interval,
- &ospf6->t_external_aggr);
+ event_add_timer(master, ospf6_asbr_summary_process, ospf6,
+ ospf6->aggr_delay_interval, &ospf6->t_external_aggr);
}
int ospf6_asbr_external_rt_advertise(struct ospf6 *ospf6,
if (bss->state == BFD_STATUS_DOWN
&& bss->previous_state == BFD_STATUS_UP) {
THREAD_OFF(on->inactivity_timer);
- thread_add_event(master, inactivity_timer, on, 0, NULL);
+ event_add_event(master, inactivity_timer, on, 0, NULL);
}
}
ospf6_lsdb_add(ospf6_lsa_copy(lsa), lsdb_self);
THREAD_OFF(lsa->refresh);
- thread_add_timer(master, ospf6_lsa_refresh, lsa, OSPF_LS_REFRESH_TIME,
- &lsa->refresh);
+ event_add_timer(master, ospf6_lsa_refresh, lsa, OSPF_LS_REFRESH_TIME,
+ &lsa->refresh);
if (IS_OSPF6_DEBUG_LSA_TYPE(lsa->header->type)
|| IS_OSPF6_DEBUG_ORIGINATE_TYPE(lsa->header->type)) {
monotime(&now);
if (!OSPF6_LSA_IS_MAXAGE(lsa)) {
- thread_add_timer(master, ospf6_lsa_expire, lsa,
- OSPF_LSA_MAXAGE + lsa->birth.tv_sec
- - now.tv_sec,
- &lsa->expire);
+ event_add_timer(master, ospf6_lsa_expire, lsa,
+ OSPF_LSA_MAXAGE + lsa->birth.tv_sec -
+ now.tv_sec,
+ &lsa->expire);
} else
lsa->expire = NULL;
ospf6_lsdb_add(ospf6_lsa_copy(lsa),
on->retrans_list);
- thread_add_timer(
- master, ospf6_lsupdate_send_neighbor,
- on, on->ospf6_if->rxmt_interval,
- &on->thread_send_lsupdate);
+ event_add_timer(master,
+ ospf6_lsupdate_send_neighbor,
+ on, on->ospf6_if->rxmt_interval,
+ &on->thread_send_lsupdate);
retrans_added++;
}
}
if ((oi->type == OSPF_IFTYPE_BROADCAST)
|| (oi->type == OSPF_IFTYPE_POINTOPOINT)) {
ospf6_lsdb_add(ospf6_lsa_copy(lsa), oi->lsupdate_list);
- thread_add_event(master, ospf6_lsupdate_send_interface, oi, 0,
- &oi->thread_send_lsupdate);
+ event_add_event(master, ospf6_lsupdate_send_interface, oi, 0,
+ &oi->thread_send_lsupdate);
} else {
/* reschedule retransmissions to all neighbors */
for (ALL_LIST_ELEMENTS(oi->neighbor_list, node, nnode, on)) {
THREAD_OFF(on->thread_send_lsupdate);
- thread_add_event(master, ospf6_lsupdate_send_neighbor,
- on, 0, &on->thread_send_lsupdate);
+ event_add_event(master, ospf6_lsupdate_send_neighbor,
+ on, 0, &on->thread_send_lsupdate);
}
}
}
"Delayed acknowledgement (BDR & MoreRecent & from DR)");
/* Delayed acknowledgement */
ospf6_lsdb_add(ospf6_lsa_copy(lsa), oi->lsack_list);
- thread_add_timer(master, ospf6_lsack_send_interface, oi,
- 3, &oi->thread_send_lsack);
+ event_add_timer(master, ospf6_lsack_send_interface, oi,
+ 3, &oi->thread_send_lsack);
} else {
if (is_debug)
zlog_debug(
"Delayed acknowledgement (BDR & Duplicate & ImpliedAck & from DR)");
/* Delayed acknowledgement */
ospf6_lsdb_add(ospf6_lsa_copy(lsa), oi->lsack_list);
- thread_add_timer(master, ospf6_lsack_send_interface, oi,
- 3, &oi->thread_send_lsack);
+ event_add_timer(master, ospf6_lsack_send_interface, oi,
+ 3, &oi->thread_send_lsack);
} else {
if (is_debug)
zlog_debug(
if (is_debug)
zlog_debug("Direct acknowledgement (BDR & Duplicate)");
ospf6_lsdb_add(ospf6_lsa_copy(lsa), from->lsack_list);
- thread_add_event(master, ospf6_lsack_send_neighbor, from, 0,
- &from->thread_send_lsack);
+ event_add_event(master, ospf6_lsack_send_neighbor, from, 0,
+ &from->thread_send_lsack);
return;
}
"Delayed acknowledgement (AllOther & MoreRecent)");
/* Delayed acknowledgement */
ospf6_lsdb_add(ospf6_lsa_copy(lsa), oi->lsack_list);
- thread_add_timer(master, ospf6_lsack_send_interface, oi, 3,
- &oi->thread_send_lsack);
+ event_add_timer(master, ospf6_lsack_send_interface, oi, 3,
+ &oi->thread_send_lsack);
return;
}
zlog_debug(
"Direct acknowledgement (AllOther & Duplicate)");
ospf6_lsdb_add(ospf6_lsa_copy(lsa), from->lsack_list);
- thread_add_event(master, ospf6_lsack_send_neighbor, from, 0,
- &from->thread_send_lsack);
+ event_add_event(master, ospf6_lsack_send_neighbor, from, 0,
+ &from->thread_send_lsack);
return;
}
/* a) Acknowledge back to neighbor (Direct acknowledgement,
* 13.5) */
ospf6_lsdb_add(ospf6_lsa_copy(new), from->lsack_list);
- thread_add_event(master, ospf6_lsack_send_neighbor, from, 0,
- &from->thread_send_lsack);
+ event_add_event(master, ospf6_lsack_send_neighbor, from, 0,
+ &from->thread_send_lsack);
/* b) Discard */
ospf6_lsa_delete(new);
"Newer instance of the self-originated LSA");
zlog_debug("Schedule reorigination");
}
- thread_add_event(master, ospf6_lsa_refresh, new, 0,
- &new->refresh);
+ event_add_event(master, ospf6_lsa_refresh, new, 0,
+ &new->refresh);
}
/* GR: check for network topology change. */
new->name);
/* BadLSReq */
- thread_add_event(master, bad_lsreq, from, 0, NULL);
+ event_add_event(master, bad_lsreq, from, 0, NULL);
ospf6_lsa_delete(new);
return;
ospf6_lsdb_add(ospf6_lsa_copy(old),
from->lsupdate_list);
- thread_add_event(master, ospf6_lsupdate_send_neighbor,
- from, 0, &from->thread_send_lsupdate);
+ event_add_event(master, ospf6_lsupdate_send_neighbor,
+ from, 0, &from->thread_send_lsupdate);
ospf6_lsa_delete(new);
return;
zlog_debug(
"GR: remaining time until grace period expires: %lu(s)",
remaining_time);
- thread_add_timer(master, ospf6_gr_grace_period_expired,
- ospf6, remaining_time,
- &ospf6->gr_info.t_grace_period);
+ event_add_timer(master, ospf6_gr_grace_period_expired,
+ ospf6, remaining_time,
+ &ospf6->gr_info.t_grace_period);
}
}
actual_grace_interval);
/* Start the grace timer */
- thread_add_timer(master, ospf6_handle_grace_timer_expiry, restarter,
- actual_grace_interval,
- &restarter->gr_helper_info.t_grace_timer);
+ event_add_timer(master, ospf6_handle_grace_timer_expiry, restarter,
+ actual_grace_interval,
+ &restarter->gr_helper_info.t_grace_timer);
return OSPF6_GR_ACTIVE_HELPER;
}
if (on->state < OSPF6_NEIGHBOR_TWOWAY)
continue;
/* Schedule AdjOK. */
- thread_add_event(master, adj_ok, on, 0,
- &on->thread_adj_ok);
+ event_add_event(master, adj_ok, on, 0,
+ &on->thread_adj_ok);
}
}
zlog_info(
"Interface %s is still in all routers group, rescheduling for SSO",
oi->interface->name);
- thread_add_timer(master, interface_up, oi,
- OSPF6_INTERFACE_SSO_RETRY_INT,
- &oi->thread_sso);
+ event_add_timer(master, interface_up, oi,
+ OSPF6_INTERFACE_SSO_RETRY_INT, &oi->thread_sso);
return;
}
#endif /* __FreeBSD__ */
zlog_info(
"Scheduling %s for sso retry, trial count: %d",
oi->interface->name, oi->sso_try_cnt);
- thread_add_timer(master, interface_up, oi,
- OSPF6_INTERFACE_SSO_RETRY_INT,
- &oi->thread_sso);
+ event_add_timer(master, interface_up, oi,
+ OSPF6_INTERFACE_SSO_RETRY_INT,
+ &oi->thread_sso);
}
return;
}
/* Schedule Hello */
if (!CHECK_FLAG(oi->flag, OSPF6_INTERFACE_PASSIVE)
&& !if_is_loopback(oi->interface)) {
- thread_add_timer(master, ospf6_hello_send, oi, 0,
- &oi->thread_send_hello);
+ event_add_timer(master, ospf6_hello_send, oi, 0,
+ &oi->thread_send_hello);
}
/* decide next interface state */
ospf6_interface_state_change(OSPF6_INTERFACE_DROTHER, oi);
else {
ospf6_interface_state_change(OSPF6_INTERFACE_WAITING, oi);
- thread_add_timer(master, wait_timer, oi, oi->dead_interval,
- &oi->thread_wait_timer);
+ event_add_timer(master, wait_timer, oi, oi->dead_interval,
+ &oi->thread_wait_timer);
}
}
/* re-establish adjacencies */
for (ALL_LIST_ELEMENTS(oi->neighbor_list, node, nnode, on)) {
THREAD_OFF(on->inactivity_timer);
- thread_add_event(master, inactivity_timer, on, 0, NULL);
+ event_add_event(master, inactivity_timer, on, 0, NULL);
}
return CMD_SUCCESS;
/* re-establish adjacencies */
for (ALL_LIST_ELEMENTS(oi->neighbor_list, node, nnode, on)) {
THREAD_OFF(on->inactivity_timer);
- thread_add_event(master, inactivity_timer, on, 0, NULL);
+ event_add_event(master, inactivity_timer, on, 0, NULL);
}
return CMD_SUCCESS;
if (thread_is_scheduled(oi->thread_send_hello)) {
THREAD_OFF(oi->thread_send_hello);
- thread_add_timer(master, ospf6_hello_send, oi, 0,
- &oi->thread_send_hello);
+ event_add_timer(master, ospf6_hello_send, oi, 0,
+ &oi->thread_send_hello);
}
return CMD_SUCCESS;
}
for (ALL_LIST_ELEMENTS(oi->neighbor_list, node, nnode, on)) {
THREAD_OFF(on->inactivity_timer);
- thread_add_event(master, inactivity_timer, on, 0, NULL);
+ event_add_event(master, inactivity_timer, on, 0, NULL);
}
return CMD_SUCCESS;
/* don't send hellos over loopback interface */
if (!if_is_loopback(oi->interface))
- thread_add_timer(master, ospf6_hello_send, oi, 0,
- &oi->thread_send_hello);
+ event_add_timer(master, ospf6_hello_send, oi, 0,
+ &oi->thread_send_hello);
return CMD_SUCCESS;
}
#define OSPF6_ROUTER_LSA_SCHEDULE(oa) \
do { \
if (CHECK_FLAG((oa)->flag, OSPF6_AREA_ENABLE)) \
- thread_add_event(master, ospf6_router_lsa_originate, \
- oa, 0, &(oa)->thread_router_lsa); \
+ event_add_event(master, ospf6_router_lsa_originate, \
+ oa, 0, &(oa)->thread_router_lsa); \
} while (0)
#define OSPF6_NETWORK_LSA_SCHEDULE(oi) \
do { \
if (!CHECK_FLAG((oi)->flag, OSPF6_INTERFACE_DISABLE)) \
- thread_add_event(master, ospf6_network_lsa_originate, \
- oi, 0, &(oi)->thread_network_lsa); \
+ event_add_event(master, ospf6_network_lsa_originate, \
+ oi, 0, &(oi)->thread_network_lsa); \
} while (0)
#define OSPF6_LINK_LSA_SCHEDULE(oi) \
do { \
if (!CHECK_FLAG((oi)->flag, OSPF6_INTERFACE_DISABLE)) \
- thread_add_event(master, ospf6_link_lsa_originate, oi, \
- 0, &(oi)->thread_link_lsa); \
+ event_add_event(master, ospf6_link_lsa_originate, oi, \
+ 0, &(oi)->thread_link_lsa); \
} while (0)
#define OSPF6_INTRA_PREFIX_LSA_SCHEDULE_STUB(oa) \
do { \
if (CHECK_FLAG((oa)->flag, OSPF6_AREA_ENABLE)) \
- thread_add_event( \
+ event_add_event( \
master, ospf6_intra_prefix_lsa_originate_stub, \
oa, 0, &(oa)->thread_intra_prefix_lsa); \
} while (0)
#define OSPF6_INTRA_PREFIX_LSA_SCHEDULE_TRANSIT(oi) \
do { \
if (!CHECK_FLAG((oi)->flag, OSPF6_INTERFACE_DISABLE)) \
- thread_add_event( \
+ event_add_event( \
master, \
ospf6_intra_prefix_lsa_originate_transit, oi, \
0, &(oi)->thread_intra_prefix_lsa); \
#define OSPF6_AS_EXTERN_LSA_SCHEDULE(oi) \
do { \
if (!CHECK_FLAG((oi)->flag, OSPF6_INTERFACE_DISABLE)) \
- thread_add_event(master, ospf6_orig_as_external_lsa, \
- oi, 0, &(oi)->thread_as_extern_lsa); \
+ event_add_event(master, ospf6_orig_as_external_lsa, \
+ oi, 0, &(oi)->thread_as_extern_lsa); \
} while (0)
#define OSPF6_ROUTER_LSA_EXECUTE(oa) \
new = ospf6_lsa_create(self->header);
new->lsdb = old->lsdb;
- thread_add_timer(master, ospf6_lsa_refresh, new, OSPF_LS_REFRESH_TIME,
- &new->refresh);
+ event_add_timer(master, ospf6_lsa_refresh, new, OSPF_LS_REFRESH_TIME,
+ &new->refresh);
/* store it in the LSDB for self-originated LSAs */
ospf6_lsdb_add(ospf6_lsa_copy(new), lsdb_self);
/* Schedule interface events */
if (backupseen)
- thread_add_event(master, backup_seen, oi, 0, NULL);
+ event_add_event(master, backup_seen, oi, 0, NULL);
if (neighborchange)
- thread_add_event(master, neighbor_change, oi, 0, NULL);
+ event_add_event(master, neighbor_change, oi, 0, NULL);
if (neighbor_ifindex_change && on->state == OSPF6_NEIGHBOR_FULL)
OSPF6_ROUTER_LSA_SCHEDULE(oi->area);
zlog_warn(
"DbDesc recv: Master/Slave bit mismatch Nbr %s",
on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
if (CHECK_FLAG(dbdesc->bits, OSPF6_DBDESC_IBIT)) {
zlog_warn("DbDesc recv: Initialize bit mismatch Nbr %s",
on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
if (memcmp(on->options, dbdesc->options, sizeof(on->options))) {
zlog_warn("DbDesc recv: Option field mismatch Nbr %s",
on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
"DbDesc recv: Sequence number mismatch Nbr %s (received %#lx, %#lx expected)",
on->name, (unsigned long)ntohl(dbdesc->seqnum),
(unsigned long)on->dbdesc_seqnum);
- thread_add_event(master, seqnumber_mismatch, on, 0,
+ event_add_event(master, seqnumber_mismatch, on, 0,
NULL);
return;
}
zlog_warn(
"DbDesc recv: Not duplicate dbdesc in state %s Nbr %s",
ospf6_neighbor_state_str[on->state], on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0, NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0, NULL);
return;
default:
zlog_debug(
"SeqNumMismatch (E-bit mismatch), discard");
ospf6_lsa_delete(his);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
/* schedule send lsreq */
if (on->request_list->count)
- thread_add_event(master, ospf6_lsreq_send, on, 0,
- &on->thread_send_lsreq);
+ event_add_event(master, ospf6_lsreq_send, on, 0,
+ &on->thread_send_lsreq);
THREAD_OFF(on->thread_send_dbdesc);
/* More bit check */
if (!CHECK_FLAG(dbdesc->bits, OSPF6_DBDESC_MBIT)
&& !CHECK_FLAG(on->dbdesc_bits, OSPF6_DBDESC_MBIT))
- thread_add_event(master, exchange_done, on, 0,
- &on->thread_exchange_done);
+ event_add_event(master, exchange_done, on, 0,
+ &on->thread_exchange_done);
else {
- thread_add_event(master, ospf6_dbdesc_send_newone, on, 0,
- &on->thread_send_dbdesc);
+ event_add_event(master, ospf6_dbdesc_send_newone, on, 0,
+ &on->thread_send_dbdesc);
}
/* save last received dbdesc */
zlog_debug(
"Duplicated dbdesc causes retransmit");
THREAD_OFF(on->thread_send_dbdesc);
- thread_add_event(master, ospf6_dbdesc_send, on, 0,
- &on->thread_send_dbdesc);
+ event_add_event(master, ospf6_dbdesc_send, on, 0,
+ &on->thread_send_dbdesc);
return;
}
zlog_warn(
"DbDesc slave recv: Master/Slave bit mismatch Nbr %s",
on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
zlog_warn(
"DbDesc slave recv: Initialize bit mismatch Nbr %s",
on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
zlog_warn(
"DbDesc slave recv: Option field mismatch Nbr %s",
on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
"DbDesc slave recv: Sequence number mismatch Nbr %s (received %#lx, %#lx expected)",
on->name, (unsigned long)ntohl(dbdesc->seqnum),
(unsigned long)on->dbdesc_seqnum + 1);
- thread_add_event(master, seqnumber_mismatch, on, 0,
+ event_add_event(master, seqnumber_mismatch, on, 0,
NULL);
return;
}
zlog_debug(
"Duplicated dbdesc causes retransmit");
THREAD_OFF(on->thread_send_dbdesc);
- thread_add_event(master, ospf6_dbdesc_send, on, 0,
- &on->thread_send_dbdesc);
+ event_add_event(master, ospf6_dbdesc_send, on, 0,
+ &on->thread_send_dbdesc);
return;
}
zlog_warn(
"DbDesc slave recv: Not duplicate dbdesc in state %s Nbr %s",
ospf6_neighbor_state_str[on->state], on->name);
- thread_add_event(master, seqnumber_mismatch, on, 0, NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0, NULL);
return;
default:
if (IS_OSPF6_DEBUG_MESSAGE(oh->type, RECV))
zlog_debug("E-bit mismatch with LSA Headers");
ospf6_lsa_delete(his);
- thread_add_event(master, seqnumber_mismatch, on, 0,
- NULL);
+ event_add_event(master, seqnumber_mismatch, on, 0,
+ NULL);
return;
}
/* schedule send lsreq */
if (on->request_list->count)
- thread_add_event(master, ospf6_lsreq_send, on, 0,
- &on->thread_send_lsreq);
+ event_add_event(master, ospf6_lsreq_send, on, 0,
+ &on->thread_send_lsreq);
THREAD_OFF(on->thread_send_dbdesc);
- thread_add_event(master, ospf6_dbdesc_send_newone, on, 0,
- &on->thread_send_dbdesc);
+ event_add_event(master, ospf6_dbdesc_send_newone, on, 0,
+ &on->thread_send_dbdesc);
/* save last received dbdesc */
memcpy(&on->dbdesc_last, dbdesc, sizeof(struct ospf6_dbdesc));
"Can't find requested lsa [%s Id:%pI4 Adv:%pI4] send badLSReq",
ospf6_lstype_name(e->type), &e->id,
&e->adv_router);
- thread_add_event(master, bad_lsreq, on, 0, NULL);
+ event_add_event(master, bad_lsreq, on, 0, NULL);
return;
}
/* schedule send lsupdate */
THREAD_OFF(on->thread_send_lsupdate);
- thread_add_event(master, ospf6_lsupdate_send_neighbor, on, 0,
- &on->thread_send_lsupdate);
+ event_add_event(master, ospf6_lsupdate_send_neighbor, on, 0,
+ &on->thread_send_lsupdate);
}
/* Verify, that the specified memory area contains exactly N valid IPv6
ospf6 = THREAD_ARG(thread);
sockfd = THREAD_FD(thread);
- thread_add_read(master, ospf6_receive, ospf6, ospf6->fd,
- &ospf6->t_ospf6_receive);
+ event_add_read(master, ospf6_receive, ospf6, ospf6->fd,
+ &ospf6->t_ospf6_receive);
while (count < ospf6->write_oi_count) {
count++;
/* If packets still remain in queue, call write thread. */
if (!list_isempty(ospf6->oi_write_q))
- thread_add_write(master, ospf6_write, ospf6, ospf6->fd,
- &ospf6->t_write);
+ event_add_write(master, ospf6_write, ospf6, ospf6->fd,
+ &ospf6->t_write);
}
void ospf6_hello_send(struct event *thread)
ospf6_packet_add_top(oi, op);
/* set next thread */
- thread_add_timer(master, ospf6_hello_send, oi, oi->hello_interval,
- &oi->thread_send_hello);
+ event_add_timer(master, ospf6_hello_send, oi, oi->hello_interval,
+ &oi->thread_send_hello);
OSPF6_MESSAGE_WRITE_ON(oi);
}
/* set next thread if master */
if (CHECK_FLAG(on->dbdesc_bits, OSPF6_DBDESC_MSBIT))
- thread_add_timer(master, ospf6_dbdesc_send, on,
- on->ospf6_if->rxmt_interval,
- &on->thread_send_dbdesc);
+ event_add_timer(master, ospf6_dbdesc_send, on,
+ on->ospf6_if->rxmt_interval,
+ &on->thread_send_dbdesc);
op = ospf6_packet_new(on->ospf6_if->ifmtu);
ospf6_make_header(OSPF6_MESSAGE_TYPE_DBDESC, on->ospf6_if, op->s);
if (!CHECK_FLAG(on->dbdesc_bits, OSPF6_DBDESC_MSBIT) && /* Slave */
!CHECK_FLAG(on->dbdesc_last.bits, OSPF6_DBDESC_MBIT)
&& !CHECK_FLAG(on->dbdesc_bits, OSPF6_DBDESC_MBIT))
- thread_add_event(master, exchange_done, on, 0,
- &on->thread_exchange_done);
+ event_add_event(master, exchange_done, on, 0,
+ &on->thread_exchange_done);
thread_execute(master, ospf6_dbdesc_send, on, 0);
}
/* schedule loading_done if request list is empty */
if (on->request_list->count == 0) {
- thread_add_event(master, loading_done, on, 0, NULL);
+ event_add_event(master, loading_done, on, 0, NULL);
return;
}
/* set next thread */
if (on->request_list->count != 0) {
- thread_add_timer(master, ospf6_lsreq_send, on,
- on->ospf6_if->rxmt_interval,
- &on->thread_send_lsreq);
+ event_add_timer(master, ospf6_lsreq_send, on,
+ on->ospf6_if->rxmt_interval,
+ &on->thread_send_lsreq);
}
}
ospf6_packet_free(op);
if (on->lsupdate_list->count != 0) {
- thread_add_event(master, ospf6_lsupdate_send_neighbor, on, 0,
- &on->thread_send_lsupdate);
+ event_add_event(master, ospf6_lsupdate_send_neighbor, on, 0,
+ &on->thread_send_lsupdate);
} else if (on->retrans_list->count != 0) {
- thread_add_timer(master, ospf6_lsupdate_send_neighbor, on,
- on->ospf6_if->rxmt_interval,
- &on->thread_send_lsupdate);
+ event_add_timer(master, ospf6_lsupdate_send_neighbor, on,
+ on->ospf6_if->rxmt_interval,
+ &on->thread_send_lsupdate);
}
}
ospf6_packet_free(op);
if (oi->lsupdate_list->count > 0) {
- thread_add_event(master, ospf6_lsupdate_send_interface, oi, 0,
- &oi->thread_send_lsupdate);
+ event_add_event(master, ospf6_lsupdate_send_interface, oi, 0,
+ &oi->thread_send_lsupdate);
}
}
OSPF6_MESSAGE_WRITE_ON(on->ospf6_if);
if (on->lsack_list->count > 0)
- thread_add_event(master, ospf6_lsack_send_neighbor, on, 0,
- &on->thread_send_lsack);
+ event_add_event(master, ospf6_lsack_send_neighbor, on, 0,
+ &on->thread_send_lsack);
}
static uint16_t ospf6_make_lsack_interface(struct ospf6_interface *oi,
/* if we run out of packet size/space here,
better to try again soon. */
THREAD_OFF(oi->thread_send_lsack);
- thread_add_event(master, ospf6_lsack_send_interface, oi,
- 0, &oi->thread_send_lsack);
+ event_add_event(master, ospf6_lsack_send_interface, oi,
+ 0, &oi->thread_send_lsack);
ospf6_lsa_unlock(lsa);
if (lsanext)
OSPF6_MESSAGE_WRITE_ON(oi);
if (oi->lsack_list->count > 0)
- thread_add_event(master, ospf6_lsack_send_interface, oi, 0,
- &oi->thread_send_lsack);
+ event_add_event(master, ospf6_lsack_send_interface, oi, 0,
+ &oi->thread_send_lsack);
}
/* Commands */
/* reset Inactivity Timer */
THREAD_OFF(on->inactivity_timer);
- thread_add_timer(master, inactivity_timer, on,
- on->ospf6_if->dead_interval, &on->inactivity_timer);
+ event_add_timer(master, inactivity_timer, on,
+ on->ospf6_if->dead_interval, &on->inactivity_timer);
if (on->state <= OSPF6_NEIGHBOR_DOWN)
ospf6_neighbor_state_change(OSPF6_NEIGHBOR_INIT, on,
if (IS_OSPF6_DEBUG_NEIGHBOR(EVENT))
zlog_debug("Neighbor Event %s: *2Way-Received*", on->name);
- thread_add_event(master, neighbor_change, on->ospf6_if, 0, NULL);
+ event_add_event(master, neighbor_change, on->ospf6_if, 0, NULL);
if (!need_adjacency(on)) {
ospf6_neighbor_state_change(OSPF6_NEIGHBOR_TWOWAY, on,
SET_FLAG(on->dbdesc_bits, OSPF6_DBDESC_IBIT);
THREAD_OFF(on->thread_send_dbdesc);
- thread_add_event(master, ospf6_dbdesc_send, on, 0,
- &on->thread_send_dbdesc);
+ event_add_event(master, ospf6_dbdesc_send, on, 0,
+ &on->thread_send_dbdesc);
}
void negotiation_done(struct event *thread)
/* RFC 2328 (10.8): Release the last dbdesc after dead_interval */
if (!CHECK_FLAG(on->dbdesc_bits, OSPF6_DBDESC_MSBIT)) {
THREAD_OFF(on->last_dbdesc_release_timer);
- thread_add_timer(master, ospf6_neighbor_last_dbdesc_release, on,
- on->ospf6_if->dead_interval,
- &on->last_dbdesc_release_timer);
+ event_add_timer(master, ospf6_neighbor_last_dbdesc_release, on,
+ on->ospf6_if->dead_interval,
+ &on->last_dbdesc_release_timer);
}
if (on->request_list->count == 0)
ospf6_neighbor_state_change(OSPF6_NEIGHBOR_LOADING, on,
OSPF6_NEIGHBOR_EVENT_EXCHANGE_DONE);
- thread_add_event(master, ospf6_lsreq_send, on, 0,
- &on->thread_send_lsreq);
+ event_add_event(master, ospf6_lsreq_send, on, 0,
+ &on->thread_send_lsreq);
}
}
if ((on->state == OSPF6_NEIGHBOR_LOADING)
|| (on->state == OSPF6_NEIGHBOR_EXCHANGE)) {
if (on->request_list->count == 0)
- thread_add_event(master, loading_done, on, 0, NULL);
+ event_add_event(master, loading_done, on, 0, NULL);
else if (on->last_ls_req == NULL) {
THREAD_OFF(on->thread_send_lsreq);
- thread_add_event(master, ospf6_lsreq_send, on, 0,
- &on->thread_send_lsreq);
+ event_add_event(master, ospf6_lsreq_send, on, 0,
+ &on->thread_send_lsreq);
}
}
}
SET_FLAG(on->dbdesc_bits, OSPF6_DBDESC_IBIT);
THREAD_OFF(on->thread_send_dbdesc);
- thread_add_event(master, ospf6_dbdesc_send, on, 0,
- &on->thread_send_dbdesc);
+ event_add_event(master, ospf6_dbdesc_send, on, 0,
+ &on->thread_send_dbdesc);
} else if (on->state >= OSPF6_NEIGHBOR_EXSTART && !need_adjacency(on)) {
ospf6_neighbor_state_change(OSPF6_NEIGHBOR_TWOWAY, on,
THREAD_OFF(on->thread_send_dbdesc);
on->dbdesc_seqnum++; /* Incr seqnum as per RFC2328, sec 10.3 */
- thread_add_event(master, ospf6_dbdesc_send, on, 0,
- &on->thread_send_dbdesc);
+ event_add_event(master, ospf6_dbdesc_send, on, 0,
+ &on->thread_send_dbdesc);
}
void bad_lsreq(struct event *thread)
THREAD_OFF(on->thread_send_dbdesc);
on->dbdesc_seqnum++; /* Incr seqnum as per RFC2328, sec 10.3 */
- thread_add_event(master, ospf6_dbdesc_send, on, 0,
- &on->thread_send_dbdesc);
-
+ event_add_event(master, ospf6_dbdesc_send, on, 0,
+ &on->thread_send_dbdesc);
}
void oneway_received(struct event *thread)
ospf6_neighbor_state_change(OSPF6_NEIGHBOR_INIT, on,
OSPF6_NEIGHBOR_EVENT_ONEWAY_RCVD);
- thread_add_event(master, neighbor_change, on->ospf6_if, 0, NULL);
+ event_add_event(master, neighbor_change, on->ospf6_if, 0, NULL);
ospf6_neighbor_clear_ls_lists(on);
ospf6_neighbor_state_change(
OSPF6_NEIGHBOR_DOWN, on,
OSPF6_NEIGHBOR_EVENT_INACTIVITY_TIMER);
- thread_add_event(master, neighbor_change, on->ospf6_if, 0,
- NULL);
+ event_add_event(master, neighbor_change, on->ospf6_if, 0, NULL);
listnode_delete(on->ospf6_if->neighbor_list, on);
ospf6_neighbor_delete(on);
"%s, Acting as HELPER for this neighbour, So restart the dead timer.",
__PRETTY_FUNCTION__);
- thread_add_timer(master, inactivity_timer, on,
- on->ospf6_if->dead_interval,
- &on->inactivity_timer);
+ event_add_timer(master, inactivity_timer, on,
+ on->ospf6_if->dead_interval,
+ &on->inactivity_timer);
}
}
listnode_add(oi->area->ospf6->oi_write_q, (oi)); \
(oi)->on_write_q = 1; \
} \
- if (list_was_empty \
- && !list_isempty(oi->area->ospf6->oi_write_q)) \
- thread_add_write(master, ospf6_write, oi->area->ospf6, \
- oi->area->ospf6->fd, \
- &oi->area->ospf6->t_write); \
+ if (list_was_empty && \
+ !list_isempty(oi->area->ospf6->oi_write_q)) \
+ event_add_write(master, ospf6_write, oi->area->ospf6, \
+ oi->area->ospf6->fd, \
+ &oi->area->ospf6->t_write); \
} while (0)
#endif /* OSPF6_NETWORK_H */
if (IS_OSPF6_DEBUG_ABR)
zlog_debug("Scheduling ABR task");
- thread_add_timer(master, ospf6_abr_task_timer, ospf6,
- OSPF6_ABR_TASK_DELAY, &ospf6->t_abr_task);
+ event_add_timer(master, ospf6_abr_task_timer, ospf6,
+ OSPF6_ABR_TASK_DELAY, &ospf6->t_abr_task);
}
/* Flush the NSSA LSAs from the area */
o->lsdb);
if (old) {
THREAD_OFF(old->refresh);
- thread_add_event(master, ospf6_lsa_refresh, old, 0,
- &old->refresh);
+ event_add_event(master, ospf6_lsa_refresh, old, 0,
+ &old->refresh);
} else {
ospf6_as_external_lsa_originate(route, o);
}
if (IS_OSPF6_DEBUG_NSSA)
ospf6_lsa_header_print(lsa);
THREAD_OFF(lsa->refresh);
- thread_add_event(master, ospf6_lsa_refresh, lsa,
- 0, &lsa->refresh);
+ event_add_event(master, ospf6_lsa_refresh, lsa,
+ 0, &lsa->refresh);
}
}
}
zlog_debug("SPF: Rescheduling in %ld msec", delay);
THREAD_OFF(ospf6->t_spf_calc);
- thread_add_timer_msec(master, ospf6_spf_calculation_thread, ospf6,
- delay, &ospf6->t_spf_calc);
+ event_add_timer_msec(master, ospf6_spf_calculation_thread, ospf6, delay,
+ &ospf6->t_spf_calc);
}
void ospf6_spf_display_subtree(struct vty *vty, const char *prefix, int rest,
if (ospf6 == NULL)
return;
- thread_add_timer(master, ospf6_ase_calculate_timer, ospf6,
- OSPF6_ASE_CALC_INTERVAL, &ospf6->t_ase_calc);
+ event_add_timer(master, ospf6_ase_calculate_timer, ospf6,
+ OSPF6_ASE_CALC_INTERVAL, &ospf6->t_ase_calc);
}
ret = ospf6_serv_sock(ospf6);
if (ret < 0 || ospf6->fd <= 0)
return 0;
- thread_add_read(master, ospf6_receive, ospf6, ospf6->fd,
- &ospf6->t_ospf6_receive);
+ event_add_read(master, ospf6_receive, ospf6, ospf6->fd,
+ &ospf6->t_ospf6_receive);
ospf6_router_id_update(ospf6, true);
}
*/
ospf6_gr_nvm_read(ospf6);
- thread_add_read(master, ospf6_receive, ospf6, ospf6->fd,
- &ospf6->t_ospf6_receive);
+ event_add_read(master, ospf6_receive, ospf6, ospf6->fd,
+ &ospf6->t_ospf6_receive);
return ospf6;
}
void ospf6_maxage_remove(struct ospf6 *o)
{
if (o)
- thread_add_timer(master, ospf6_maxage_remover, o,
- OSPF_LSA_MAXAGE_REMOVE_DELAY_DEFAULT,
- &o->maxage_remover);
+ event_add_timer(master, ospf6_maxage_remover, o,
+ OSPF_LSA_MAXAGE_REMOVE_DELAY_DEFAULT,
+ &o->maxage_remover);
}
bool ospf6_router_id_update(struct ospf6 *ospf6, bool init)
}
/* Reschedule read thread */
- thread_add_read(master, lsa_read, oclient, fd, NULL);
+ event_add_read(master, lsa_read, oclient, fd, NULL);
}
/* ---------------------------------------------------------
lsa_type, opaque_type, &addr);
/* Schedule opaque LSA originate in 5 secs */
- thread_add_timer(master, lsa_inject, oclient, 5, NULL);
+ event_add_timer(master, lsa_inject, oclient, 5, NULL);
/* Schedule opaque LSA update with new value */
- thread_add_timer(master, lsa_inject, oclient, 10, NULL);
+ event_add_timer(master, lsa_inject, oclient, 10, NULL);
/* Schedule delete */
- thread_add_timer(master, lsa_delete, oclient, 30, NULL);
+ event_add_timer(master, lsa_delete, oclient, 30, NULL);
}
static void new_if_callback(struct in_addr ifaddr, struct in_addr area_id)
ospf_apiclient_sync_lsdb(oclient);
/* Schedule thread that handles asynchronous messages */
- thread_add_read(master, lsa_read, oclient, oclient->fd_async, NULL);
+ event_add_read(master, lsa_read, oclient, oclient->fd_async, NULL);
/* Now connection is established, run loop */
while (1) {
* giving time for route synchronization in
* all the routers.
*/
- thread_add_timer(
- master, ospf_abr_announce_non_dna_routers, ospf,
- OSPF_ABR_DNA_TIMER, &ospf->t_abr_fr);
+ event_add_timer(master,
+ ospf_abr_announce_non_dna_routers, ospf,
+ OSPF_ABR_DNA_TIMER, &ospf->t_abr_fr);
}
}
if (IS_DEBUG_OSPF_EVENT)
zlog_debug("Scheduling ABR task");
- thread_add_timer(master, ospf_abr_task_timer, ospf, OSPF_ABR_TASK_DELAY,
- &ospf->t_abr_task);
+ event_add_timer(master, ospf_abr_task_timer, ospf, OSPF_ABR_TASK_DELAY,
+ &ospf->t_abr_task);
}
{
switch (event) {
case OSPF_APISERVER_ACCEPT:
- (void)thread_add_read(master, ospf_apiserver_accept, apiserv,
- fd, NULL);
+ (void)event_add_read(master, ospf_apiserver_accept, apiserv, fd,
+ NULL);
break;
case OSPF_APISERVER_SYNC_READ:
apiserv->t_sync_read = NULL;
- thread_add_read(master, ospf_apiserver_read, apiserv, fd,
- &apiserv->t_sync_read);
+ event_add_read(master, ospf_apiserver_read, apiserv, fd,
+ &apiserv->t_sync_read);
break;
#ifdef USE_ASYNC_READ
case OSPF_APISERVER_ASYNC_READ:
apiserv->t_async_read = NULL;
- thread_add_read(master, ospf_apiserver_read, apiserv, fd,
- &apiserv->t_async_read);
+ event_add_read(master, ospf_apiserver_read, apiserv, fd,
+ &apiserv->t_async_read);
break;
#endif /* USE_ASYNC_READ */
case OSPF_APISERVER_SYNC_WRITE:
- thread_add_write(master, ospf_apiserver_sync_write, apiserv, fd,
- &apiserv->t_sync_write);
+ event_add_write(master, ospf_apiserver_sync_write, apiserv, fd,
+ &apiserv->t_sync_write);
break;
case OSPF_APISERVER_ASYNC_WRITE:
- thread_add_write(master, ospf_apiserver_async_write, apiserv,
- fd, &apiserv->t_async_write);
+ event_add_write(master, ospf_apiserver_async_write, apiserv, fd,
+ &apiserv->t_async_write);
break;
}
}
if (IS_DEBUG_OSPF_EVENT)
zlog_debug("Scheduling ASBR NSSA redistribution update");
- thread_add_timer(master, ospf_asbr_nssa_redist_update_timer, ospf,
- OSPF_ASBR_NSSA_REDIST_UPDATE_DELAY,
- &ospf->t_asbr_nssa_redist_update);
+ event_add_timer(master, ospf_asbr_nssa_redist_update_timer, ospf,
+ OSPF_ASBR_NSSA_REDIST_UPDATE_DELAY,
+ &ospf->t_asbr_nssa_redist_update);
}
void ospf_redistribute_withdraw(struct ospf *ospf, uint8_t type,
__func__, ospf->aggr_delay_interval);
ospf->aggr_action = operation;
- thread_add_timer(master, ospf_asbr_external_aggr_process, ospf,
- ospf->aggr_delay_interval, &ospf->t_external_aggr);
+ event_add_timer(master, ospf_asbr_external_aggr_process, ospf,
+ ospf->aggr_delay_interval, &ospf->t_external_aggr);
}
int ospf_asbr_external_aggregator_set(struct ospf *ospf, struct prefix_ipv4 *p,
if (ospf == NULL)
return;
- thread_add_timer(master, ospf_ase_calculate_timer, ospf,
- OSPF_ASE_CALC_INTERVAL, &ospf->t_ase_calc);
+ event_add_timer(master, ospf_ase_calculate_timer, ospf,
+ OSPF_ASE_CALC_INTERVAL, &ospf->t_ase_calc);
}
void ospf_ase_register_external_lsa(struct ospf_lsa *lsa, struct ospf *top)
zlog_debug(
"GR: remaining time until grace period expires: %lu(s)",
remaining_time);
- thread_add_timer(master, ospf_gr_grace_period_expired,
- ospf, remaining_time,
- &ospf->gr_info.t_grace_period);
+ event_add_timer(master, ospf_gr_grace_period_expired,
+ ospf, remaining_time,
+ &ospf->gr_info.t_grace_period);
}
}
actual_grace_interval);
/* Start the grace timer */
- thread_add_timer(master, ospf_handle_grace_timer_expiry, restarter,
- actual_grace_interval,
- &restarter->gr_helper_info.t_grace_timer);
+ event_add_timer(master, ospf_handle_grace_timer_expiry, restarter,
+ actual_grace_interval,
+ &restarter->gr_helper_info.t_grace_timer);
return OSPF_GR_ACTIVE_HELPER;
}
oi->on_write_q = 1; \
} \
if (!list_isempty((O)->oi_write_q)) \
- thread_add_write(master, ospf_write, (O), (O)->fd, \
- &(O)->t_write); \
+ event_add_write(master, ospf_write, (O), (O)->fd, \
+ &(O)->t_write); \
} while (0)
/* Macro for OSPF ISM timer turn on. */
-#define OSPF_ISM_TIMER_ON(T, F, V) thread_add_timer(master, (F), oi, (V), &(T))
+#define OSPF_ISM_TIMER_ON(T, F, V) event_add_timer(master, (F), oi, (V), &(T))
#define OSPF_ISM_TIMER_MSEC_ON(T, F, V) \
- thread_add_timer_msec(master, (F), oi, (V), &(T))
+ event_add_timer_msec(master, (F), oi, (V), &(T))
/* convenience macro to set hello timer correctly, according to
* whether fast-hello is set or not
/* Macro for OSPF schedule event. */
#define OSPF_ISM_EVENT_SCHEDULE(I, E) \
- thread_add_event(master, ospf_ism_event, (I), (E), NULL)
+ event_add_event(master, ospf_ism_event, (I), (E), NULL)
/* Macro for OSPF execute event. */
#define OSPF_ISM_EVENT_EXECUTE(I, E) \
ols_debug("%s: start holddown timer for %s time %d", __func__,
ifp->name, ldp_sync_info->holddown);
- thread_add_timer(master, ospf_ldp_sync_holddown_timer,
- ifp, ldp_sync_info->holddown,
- &ldp_sync_info->t_holddown);
+ event_add_timer(master, ospf_ldp_sync_holddown_timer, ifp,
+ ldp_sync_info->holddown, &ldp_sync_info->t_holddown);
}
/*
data->area = area;
data->lsa = ospf_lsa_lock(lsa); /* Message / Flood area */
- thread_add_event(master, ospf_lsa_action, data, 0, NULL);
+ event_add_event(master, ospf_lsa_action, data, 0, NULL);
}
void ospf_schedule_lsa_flush_area(struct ospf_area *area, struct ospf_lsa *lsa)
data->area = area;
data->lsa = ospf_lsa_lock(lsa); /* Message / Flush area */
- thread_add_event(master, ospf_lsa_action, data, 0, NULL);
+ event_add_event(master, ospf_lsa_action, data, 0, NULL);
}
}
ospf->t_lsa_refresher = NULL;
- thread_add_timer(master, ospf_lsa_refresh_walker, ospf,
- ospf->lsa_refresh_interval, &ospf->t_lsa_refresher);
+ event_add_timer(master, ospf_lsa_refresh_walker, ospf,
+ ospf->lsa_refresh_interval, &ospf->t_lsa_refresher);
ospf->lsa_refresher_started = monotime(NULL);
for (ALL_LIST_ELEMENTS(lsa_to_refresh, node, nnode, lsa)) {
#define OSPF_NSM_EVENT_MAX 14
/* Macro for OSPF NSM timer turn on. */
-#define OSPF_NSM_TIMER_ON(T,F,V) thread_add_timer (master, (F), nbr, (V), &(T))
+#define OSPF_NSM_TIMER_ON(T, F, V) event_add_timer(master, (F), nbr, (V), &(T))
/* Macro for OSPF NSM schedule event. */
#define OSPF_NSM_EVENT_SCHEDULE(N, E) \
- thread_add_event(master, ospf_nsm_event, (N), (E), NULL)
+ event_add_event(master, ospf_nsm_event, (N), (E), NULL)
/* Macro for OSPF NSM execute event. */
#define OSPF_NSM_EVENT_EXECUTE(N, E) \
"Schedule Type-9 Opaque-LSA origination in %d ms later.",
delay);
oi->t_opaque_lsa_self = NULL;
- thread_add_timer_msec(master, ospf_opaque_type9_lsa_originate,
- oi, delay, &oi->t_opaque_lsa_self);
+ event_add_timer_msec(master, ospf_opaque_type9_lsa_originate,
+ oi, delay, &oi->t_opaque_lsa_self);
delay += top->min_ls_interval;
}
"Schedule Type-10 Opaque-LSA origination in %d ms later.",
delay);
area->t_opaque_lsa_self = NULL;
- thread_add_timer_msec(master, ospf_opaque_type10_lsa_originate,
- area, delay, &area->t_opaque_lsa_self);
+ event_add_timer_msec(master, ospf_opaque_type10_lsa_originate,
+ area, delay, &area->t_opaque_lsa_self);
delay += top->min_ls_interval;
}
"Schedule Type-11 Opaque-LSA origination in %d ms later.",
delay);
top->t_opaque_lsa_self = NULL;
- thread_add_timer_msec(master, ospf_opaque_type11_lsa_originate,
- top, delay, &top->t_opaque_lsa_self);
+ event_add_timer_msec(master, ospf_opaque_type11_lsa_originate,
+ top, delay, &top->t_opaque_lsa_self);
delay += top->min_ls_interval;
}
* triggered by external interventions (vty session, signaling, etc).
*------------------------------------------------------------------------*/
-#define OSPF_OPAQUE_TIMER_ON(T,F,L,V) thread_add_timer_msec (master, (F), (L), (V), &(T))
+#define OSPF_OPAQUE_TIMER_ON(T, F, L, V) \
+ event_add_timer_msec(master, (F), (L), (V), &(T))
static struct ospf_lsa *pseudo_lsa(struct ospf_interface *oi,
struct ospf_area *area, uint8_t lsa_type,
void ospf_ls_req_event(struct ospf_neighbor *nbr)
{
THREAD_OFF(nbr->t_ls_req);
- thread_add_event(master, ospf_ls_req_timer, nbr, 0, &nbr->t_ls_req);
+ event_add_event(master, ospf_ls_req_timer, nbr, 0, &nbr->t_ls_req);
}
/* Cyclic timer function. Fist registered in ospf_nbr_new () in
/* If packets still remain in queue, call write thread. */
if (!list_isempty(ospf->oi_write_q))
- thread_add_write(master, ospf_write, ospf, ospf->fd,
- &ospf->t_write);
+ event_add_write(master, ospf_write, ospf, ospf->fd,
+ &ospf->t_write);
}
/* OSPF Hello message read -- RFC2328 Section 10.5. */
ospf = THREAD_ARG(thread);
/* prepare for next packet. */
- thread_add_read(master, ospf_read, ospf, ospf->fd, &ospf->t_read);
+ event_add_read(master, ospf_read, ospf, ospf->fd, &ospf->t_read);
while (count < ospf->write_oi_count) {
count++;
"%s: update lists not cleared, %d nodes to try again, raising new event",
__func__, again);
oi->t_ls_upd_event = NULL;
- thread_add_event(master, ospf_ls_upd_send_queue_event, oi, 0,
- &oi->t_ls_upd_event);
+ event_add_event(master, ospf_ls_upd_send_queue_event, oi, 0,
+ &oi->t_ls_upd_event);
}
if (IS_DEBUG_OSPF_EVENT)
rn->p.u.prefix4, 1);
}
} else
- thread_add_event(master, ospf_ls_upd_send_queue_event, oi, 0,
- &oi->t_ls_upd_event);
+ event_add_event(master, ospf_ls_upd_send_queue_event, oi, 0,
+ &oi->t_ls_upd_event);
}
static void ospf_ls_ack_send_list(struct ospf_interface *oi, struct list *ack,
listnode_add(oi->ls_ack_direct.ls_ack, ospf_lsa_lock(lsa));
- thread_add_event(master, ospf_ls_ack_send_event, oi, 0,
- &oi->t_ls_ack_direct);
+ event_add_event(master, ospf_ls_ack_send_event, oi, 0,
+ &oi->t_ls_ack_direct);
}
/* Send Link State Acknowledgment delayed. */
zlog_debug("SPF: calculation timer delay = %ld msec", delay);
ospf->t_spf_calc = NULL;
- thread_add_timer_msec(master, ospf_spf_calculate_schedule_worker, ospf,
- delay, &ospf->t_spf_calc);
+ event_add_timer_msec(master, ospf_spf_calculate_schedule_worker, ospf,
+ delay, &ospf->t_spf_calc);
}
/* Restart OSPF SPF algorithm*/
if (!ospf_zebra_label_manager_ready())
if (ospf_zebra_label_manager_connect() < 0) {
/* Re-attempt to connect to Label Manager in 1 sec. */
- thread_add_timer(master, sr_start_label_manager, ospf,
- 1, &OspfSR.t_start_lm);
+ event_add_timer(master, sr_start_label_manager, ospf, 1,
+ &OspfSR.t_start_lm);
osr_debug(" |- Failed to start the Label Manager");
return -1;
}
/*
* Check if default needs to be flushed too.
*/
- thread_add_event(master, ospf_external_lsa_default_routemap_timer, ospf,
- 0, &ospf->t_default_routemap_timer);
+ event_add_event(master, ospf_external_lsa_default_routemap_timer, ospf,
+ 0, &ospf->t_default_routemap_timer);
}
/* Update NHLFE for Prefix SID */
* there are any other external info which can still trigger
* default route origination else flush it.
*/
- thread_add_event(master,
- ospf_external_lsa_default_routemap_timer, ospf,
- 0, &ospf->t_default_routemap_timer);
+ event_add_event(master,
+ ospf_external_lsa_default_routemap_timer, ospf,
+ 0, &ospf->t_default_routemap_timer);
}
return true;
return;
/* Set timer. If timer is already started, this call does nothing. */
- thread_add_timer_msec(master, ospf_distribute_list_update_timer, ospf,
- ospf->min_ls_interval,
- &ospf->t_distribute_update);
+ event_add_timer_msec(master, ospf_distribute_list_update_timer, ospf,
+ ospf->min_ls_interval, &ospf->t_distribute_update);
}
/* If access-list is updated, apply some check. */
new->maxage_delay = OSPF_LSA_MAXAGE_REMOVE_DELAY_DEFAULT;
new->maxage_lsa = route_table_init();
new->t_maxage_walker = NULL;
- thread_add_timer(master, ospf_lsa_maxage_walker, new,
- OSPF_LSA_MAXAGE_CHECK_INTERVAL, &new->t_maxage_walker);
+ event_add_timer(master, ospf_lsa_maxage_walker, new,
+ OSPF_LSA_MAXAGE_CHECK_INTERVAL, &new->t_maxage_walker);
/* Max paths initialization */
new->max_multipath = MULTIPATH_NUM;
new->lsa_refresh_interval = OSPF_LSA_REFRESH_INTERVAL_DEFAULT;
new->lsa_refresh_timer = OSPF_LS_REFRESH_TIME;
new->t_lsa_refresher = NULL;
- thread_add_timer(master, ospf_lsa_refresh_walker, new,
- new->lsa_refresh_interval, &new->t_lsa_refresher);
+ event_add_timer(master, ospf_lsa_refresh_walker, new,
+ new->lsa_refresh_interval, &new->t_lsa_refresher);
new->lsa_refresher_started = monotime(NULL);
new->ibuf = stream_new(OSPF_MAX_PACKET_SIZE + 1);
return new;
}
- thread_add_read(master, ospf_read, new, new->fd, &new->t_read);
+ event_add_read(master, ospf_read, new, new->fd, &new->t_read);
new->oi_running = 1;
ospf_router_id_update(new);
if (time_left > interval) {
THREAD_OFF(ospf->t_lsa_refresher);
- thread_add_timer(master, ospf_lsa_refresh_walker, ospf,
- interval, &ospf->t_lsa_refresher);
+ event_add_timer(master, ospf_lsa_refresh_walker, ospf, interval,
+ &ospf->t_lsa_refresher);
}
ospf->lsa_refresh_interval = interval;
if (time_left > OSPF_LSA_REFRESH_INTERVAL_DEFAULT) {
THREAD_OFF(ospf->t_lsa_refresher);
ospf->t_lsa_refresher = NULL;
- thread_add_timer(master, ospf_lsa_refresh_walker, ospf,
- OSPF_LSA_REFRESH_INTERVAL_DEFAULT,
- &ospf->t_lsa_refresher);
+ event_add_timer(master, ospf_lsa_refresh_walker, ospf,
+ OSPF_LSA_REFRESH_INTERVAL_DEFAULT,
+ &ospf->t_lsa_refresher);
}
ospf->lsa_refresh_interval = OSPF_LSA_REFRESH_INTERVAL_DEFAULT;
ret = ospf_sock_init(ospf);
if (ret < 0 || ospf->fd <= 0)
return 0;
- thread_add_read(master, ospf_read, ospf, ospf->fd,
- &ospf->t_read);
+ event_add_read(master, ospf_read, ospf, ospf->fd,
+ &ospf->t_read);
ospf->oi_running = 1;
ospf_router_id_update(ospf);
}
#define LSA_OPTIONS_NSSA_GET(area) \
(((area)->external_routing == OSPF_AREA_NSSA) ? OSPF_OPTION_NP : 0)
-#define OSPF_TIMER_ON(T,F,V) thread_add_timer (master,(F),ospf,(V),&(T))
-#define OSPF_AREA_TIMER_ON(T,F,V) thread_add_timer (master, (F), area, (V), &(T))
-#define OSPF_POLL_TIMER_ON(T,F,V) thread_add_timer (master, (F), nbr_nbma, (V), &(T))
+#define OSPF_TIMER_ON(T, F, V) event_add_timer(master, (F), ospf, (V), &(T))
+#define OSPF_AREA_TIMER_ON(T, F, V) \
+ event_add_timer(master, (F), area, (V), &(T))
+#define OSPF_POLL_TIMER_ON(T, F, V) \
+ event_add_timer(master, (F), nbr_nbma, (V), &(T))
/* Extern variables. */
extern struct ospf_master *om;
int pcep_ctrl_halt_cb(struct frr_pthread *fpt, void **res)
{
- thread_add_event(fpt->master, pcep_thread_finish_event_handler,
- (void *)fpt, 0, NULL);
+ event_add_event(fpt->master, pcep_thread_finish_event_handler,
+ (void *)fpt, 0, NULL);
pthread_join(fpt->thread, res);
return 0;
data->continue_lsp_update_handler = cb;
data->payload = payload;
- thread_add_event(ctrl_state->main, pcep_refine_path_event_cb,
- (void *)data, 0, NULL);
+ event_add_event(ctrl_state->main, pcep_refine_path_event_cb,
+ (void *)data, 0, NULL);
return 0;
}
data->pcc_id = pcc_id;
data->payload = payload;
- thread_add_timer(ctrl_state->self, timer_cb, (void *)data, delay,
- thread);
+ event_add_timer(ctrl_state->self, timer_cb, (void *)data, delay,
+ thread);
return 0;
}
data->payload = payload;
if (is_read) {
- thread_add_read(ctrl_state->self, socket_cb, (void *)data, fd,
- thread);
+ event_add_read(ctrl_state->self, socket_cb, (void *)data, fd,
+ thread);
} else {
- thread_add_write(ctrl_state->self, socket_cb, (void *)data, fd,
- thread);
+ event_add_write(ctrl_state->self, socket_cb, (void *)data, fd,
+ thread);
}
return 0;
data->pcc_id = pcc_id;
data->payload = payload;
- thread_add_event(ctrl_state->self, event_cb, (void *)data, 0, NULL);
+ event_add_event(ctrl_state->self, event_cb, (void *)data, 0, NULL);
return 0;
}
data->pcc_id = pcc_id;
data->payload = payload;
- thread_add_event(ctrl_state->main, pcep_main_event_handler,
- (void *)data, 0, NULL);
+ event_add_event(ctrl_state->main, pcep_main_event_handler, (void *)data,
+ 0, NULL);
return 0;
}
PATH_TED_DEBUG("%s: PATHD-TED: Opaque asked for TED sync ",
__func__);
}
- thread_add_timer(ted_state_g.main, path_ted_timer_handler_sync,
- &ted_state_g, ted_state_g.link_state_delay_interval,
- &ted_state_g.t_link_state_sync);
+ event_add_timer(ted_state_g.main, path_ted_timer_handler_sync,
+ &ted_state_g, ted_state_g.link_state_delay_interval,
+ &ted_state_g.t_link_state_sync);
return status;
}
int status = 0;
path_ted_timer_refresh_cancel();
- thread_add_timer(ted_state_g.main, path_ted_timer_handler_refresh,
- &ted_state_g,
- ted_state_g.segment_list_refresh_interval,
- &ted_state_g.t_segment_list_refresh);
+ event_add_timer(ted_state_g.main, path_ted_timer_handler_refresh,
+ &ted_state_g, ted_state_g.segment_list_refresh_interval,
+ &ted_state_g.t_segment_list_refresh);
return status;
}
from changing the candidate by hand with the console */
if (candidate->hook_timer != NULL)
return;
- thread_add_timer(master, trigger_pathd_candidate_created_timer,
- (void *)candidate, HOOK_DELAY, &candidate->hook_timer);
+ event_add_timer(master, trigger_pathd_candidate_created_timer,
+ (void *)candidate, HOOK_DELAY, &candidate->hook_timer);
}
void trigger_pathd_candidate_created_timer(struct event *thread)
from changing the candidate by hand with the console */
if (candidate->hook_timer != NULL)
return;
- thread_add_timer(master, trigger_pathd_candidate_updated_timer,
- (void *)candidate, HOOK_DELAY, &candidate->hook_timer);
+ event_add_timer(master, trigger_pathd_candidate_updated_timer,
+ (void *)candidate, HOOK_DELAY, &candidate->hook_timer);
}
void trigger_pathd_candidate_updated_timer(struct event *thread)
log_ifp("next general expiry in %" PRId64 "ms"),
remain_ms / 1000);
- thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
- &remain, &gm_ifp->t_expire);
+ event_add_timer_tv(router->master, gm_t_expire, gm_ifp,
+ &remain, &gm_ifp->t_expire);
return;
}
zlog_debug(
log_ifp("starting general timer @ 0: %pTVMu"),
&pend->expiry);
- thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
- &timers->expire_wait, &gm_ifp->t_expire);
+ event_add_timer_tv(router->master, gm_t_expire, gm_ifp,
+ &timers->expire_wait, &gm_ifp->t_expire);
} else if (PIM_DEBUG_GM_TRACE)
zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
gm_ifp->n_pending, &pend->expiry);
THREAD_OFF(sg->t_sg_expire);
}
- thread_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
- &sg->t_sg_expire);
+ event_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
+ &sg->t_sg_expire);
}
static void gm_handle_q_groupsrc(struct gm_if *gm_ifp,
}
monotime(&pend->query);
- thread_add_timer_tv(router->master, gm_t_grp_expire, pend,
- &timers->expire_wait, &pend->t_expire);
+ event_add_timer_tv(router->master, gm_t_grp_expire, pend,
+ &timers->expire_wait, &pend->t_expire);
if (PIM_DEBUG_GM_TRACE)
zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp,
THREAD_OFF(gm_ifp->t_other_querier);
other_ms = timers.qrv * timers.qqic_ms + timers.max_resp_ms / 2;
- thread_add_timer_msec(router->master, gm_t_other_querier,
- gm_ifp, other_ms,
- &gm_ifp->t_other_querier);
+ event_add_timer_msec(router->master, gm_t_other_querier, gm_ifp,
+ other_ms, &gm_ifp->t_other_querier);
}
if (len == sizeof(struct mld_v1_pkt)) {
ssize_t nread;
size_t pktlen;
- thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
- &pim->t_gm_recv);
+ event_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
+ &pim->t_gm_recv);
iov->iov_base = rxbuf;
iov->iov_len = sizeof(rxbuf);
gm_ifp->n_startup--;
}
- thread_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
- &gm_ifp->t_query);
+ event_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
+ &gm_ifp->t_query);
gm_send_query(gm_ifp, PIMADDR_ANY, NULL, 0, false);
}
sg->n_query--;
if (sg->n_query)
- thread_add_timer_msec(router->master, gm_t_sg_query, sg,
- gm_ifp->cur_query_intv_trig,
- &sg->t_sg_query);
+ event_add_timer_msec(router->master, gm_t_sg_query, sg,
+ gm_ifp->cur_query_intv_trig,
+ &sg->t_sg_query);
if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
return;
pend_gsq->iface = gm_ifp;
gm_gsq_pends_add(gm_ifp->gsq_pends, pend_gsq);
- thread_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
- &gm_ifp->cfg_timing_fuzz,
- &pend_gsq->t_send);
+ event_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
+ &gm_ifp->cfg_timing_fuzz, &pend_gsq->t_send);
}
assert(pend_gsq->n_src < array_size(pend_gsq->srcs));
vrf->name);
}
- thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
- &pim->t_gm_recv);
+ event_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
+ &pim->t_gm_recv);
}
static void gm_vrf_socket_decref(struct pim_instance *pim)
__func__, ch->sg_str, interval, ch->interface->name);
}
- thread_add_timer(router->master, on_assert_timer, ch, interval,
- &ch->t_ifassert_timer);
+ event_add_timer(router->master, on_assert_timer, ch, interval,
+ &ch->t_ifassert_timer);
}
static void pim_assert_timer_reset(struct pim_ifchannel *ch)
zlog_debug(
"%s : starting bs timer for scope %d with timeout %d secs",
__func__, scope->sz_id, bs_timeout);
- thread_add_timer(router->master, pim_on_bs_timer, scope, bs_timeout,
- &scope->bs_timer);
+ event_add_timer(router->master, pim_on_bs_timer, scope, bs_timeout,
+ &scope->bs_timer);
}
static inline void pim_bs_timer_restart(struct bsm_scope *scope, int bs_timeout)
__func__, &bsrp->bsgrp_node->group, &bsrp->rp_address,
hold_time, bsrp->rp_holdtime);
- thread_add_timer(router->master, pim_on_g2rp_timer, bsrp, hold_time,
- &bsrp->g2rp_timer);
+ event_add_timer(router->master, pim_on_g2rp_timer, bsrp, hold_time,
+ &bsrp->g2rp_timer);
}
static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
}
if (holdtime != 0xFFFF) {
- thread_add_timer(router->master, on_ifjoin_expiry_timer, ch,
- holdtime, &ch->t_ifjoin_expiry_timer);
+ event_add_timer(router->master, on_ifjoin_expiry_timer, ch,
+ holdtime, &ch->t_ifjoin_expiry_timer);
}
}
THREAD_OFF(ch->t_ifjoin_prune_pending_timer);
THREAD_OFF(ch->t_ifjoin_expiry_timer);
- thread_add_timer_msec(
- router->master, on_ifjoin_prune_pending_timer,
- ch, jp_override_interval_msec,
- &ch->t_ifjoin_prune_pending_timer);
- thread_add_timer(router->master, on_ifjoin_expiry_timer,
- ch, holdtime,
- &ch->t_ifjoin_expiry_timer);
+ event_add_timer_msec(router->master,
+ on_ifjoin_prune_pending_timer, ch,
+ jp_override_interval_msec,
+ &ch->t_ifjoin_prune_pending_timer);
+ event_add_timer(router->master, on_ifjoin_expiry_timer,
+ ch, holdtime,
+ &ch->t_ifjoin_expiry_timer);
pim_upstream_update_join_desired(pim_ifp->pim,
ch->upstream);
}
be taken not to use "ch" afterwards since it would be
deleted. */
THREAD_OFF(ch->t_ifjoin_prune_pending_timer);
- thread_add_timer_msec(router->master,
- on_ifjoin_prune_pending_timer, ch,
- jp_override_interval_msec,
- &ch->t_ifjoin_prune_pending_timer);
+ event_add_timer_msec(router->master,
+ on_ifjoin_prune_pending_timer, ch,
+ jp_override_interval_msec,
+ &ch->t_ifjoin_prune_pending_timer);
break;
case PIM_IFJOIN_PRUNE:
if (source_flags & PIM_ENCODE_RPT_BIT) {
THREAD_OFF(ch->t_ifjoin_expiry_timer);
}
- thread_add_timer(router->master, on_ifjoin_expiry_timer,
- ch, holdtime,
- &ch->t_ifjoin_expiry_timer);
+ event_add_timer(router->master, on_ifjoin_expiry_timer,
+ ch, holdtime,
+ &ch->t_ifjoin_expiry_timer);
}
break;
case PIM_IFJOIN_PRUNE_TMP:
if (source_flags & PIM_ENCODE_RPT_BIT) {
ch->ifjoin_state = PIM_IFJOIN_PRUNE;
THREAD_OFF(ch->t_ifjoin_expiry_timer);
- thread_add_timer(router->master, on_ifjoin_expiry_timer,
- ch, holdtime,
- &ch->t_ifjoin_expiry_timer);
+ event_add_timer(router->master, on_ifjoin_expiry_timer,
+ ch, holdtime,
+ &ch->t_ifjoin_expiry_timer);
}
break;
case PIM_IFJOIN_PRUNE_PENDING_TMP:
if (source_flags & PIM_ENCODE_RPT_BIT) {
ch->ifjoin_state = PIM_IFJOIN_PRUNE_PENDING;
THREAD_OFF(ch->t_ifjoin_expiry_timer);
- thread_add_timer(router->master, on_ifjoin_expiry_timer,
- ch, holdtime,
- &ch->t_ifjoin_expiry_timer);
+ event_add_timer(router->master, on_ifjoin_expiry_timer,
+ ch, holdtime,
+ &ch->t_ifjoin_expiry_timer);
}
break;
}
other_querier_present_interval_msec % 1000);
}
- thread_add_timer_msec(router->master, pim_igmp_other_querier_expire,
- igmp, other_querier_present_interval_msec,
- &igmp->t_other_querier_timer);
+ event_add_timer_msec(router->master, pim_igmp_other_querier_expire,
+ igmp, other_querier_present_interval_msec,
+ &igmp->t_other_querier_timer);
}
void pim_igmp_other_querier_timer_off(struct gm_sock *igmp)
ifaddr_str, query_interval,
startup_mode ? "startup" : "non-startup", igmp->fd);
}
- thread_add_timer(router->master, pim_igmp_general_query, igmp,
- query_interval, &igmp->t_igmp_query_timer);
+ event_add_timer(router->master, pim_igmp_general_query, igmp,
+ query_interval, &igmp->t_igmp_query_timer);
}
void pim_igmp_general_query_off(struct gm_sock *igmp)
zlog_debug("Scheduling READ event on IGMP socket fd=%d",
igmp->fd);
}
- thread_add_read(router->master, pim_igmp_read, igmp, igmp->fd,
- &igmp->t_igmp_read);
+ event_add_read(router->master, pim_igmp_read, igmp, igmp->fd,
+ &igmp->t_igmp_read);
}
struct gm_sock *pim_igmp_sock_add(struct list *igmp_sock_list,
*/
assert(group->group_filtermode_isexcl);
- thread_add_timer_msec(router->master, igmp_group_timer, group,
- interval_msec, &group->t_group_timer);
+ event_add_timer_msec(router->master, igmp_group_timer, group,
+ interval_msec, &group->t_group_timer);
}
struct gm_group *find_group_by_addr(struct gm_sock *igmp,
source_str, group->interface->name);
}
- thread_add_timer_msec(router->master, igmp_source_timer, source,
- interval_msec, &source->t_source_timer);
+ event_add_timer_msec(router->master, igmp_source_timer, source,
+ interval_msec, &source->t_source_timer);
/*
RFC 3376: 6.3. IGMPv3 Source-Specific Forwarding Rules
group->interface->name);
}
- thread_add_timer_msec(router->master, igmp_group_retransmit, group,
- lmqi_msec,
- &group->t_group_query_retransmit_timer);
+ event_add_timer_msec(router->master, igmp_group_retransmit, group,
+ lmqi_msec, &group->t_group_query_retransmit_timer);
}
static long igmp_group_timer_remain_msec(struct gm_group *group)
router->mlag_process_register = true;
- thread_add_event(router->master, pim_mlag_register_handler, NULL, 0,
- NULL);
+ event_add_event(router->master, pim_mlag_register_handler, NULL, 0,
+ NULL);
}
static void pim_mlag_deregister_handler(struct event *thread)
router->mlag_process_register = false;
- thread_add_event(router->master, pim_mlag_deregister_handler, NULL, 0,
- NULL);
+ event_add_event(router->master, pim_mlag_deregister_handler, NULL, 0,
+ NULL);
}
void pim_if_configure_mlag_dualactive(struct pim_interface *pim_ifp)
static void mroute_read_on(struct pim_instance *pim)
{
- thread_add_read(router->master, mroute_read, pim, pim->mroute_socket,
- &pim->thread);
+ event_add_read(router->master, mroute_read, pim, pim->mroute_socket,
+ &pim->thread);
}
static void mroute_read_off(struct pim_instance *pim)
{
THREAD_OFF(pim->msdp.sa_adv_timer);
if (start) {
- thread_add_timer(pim->msdp.master, pim_msdp_sa_adv_timer_cb,
- pim, PIM_MSDP_SA_ADVERTISMENT_TIME,
- &pim->msdp.sa_adv_timer);
+ event_add_timer(pim->msdp.master, pim_msdp_sa_adv_timer_cb, pim,
+ PIM_MSDP_SA_ADVERTISMENT_TIME,
+ &pim->msdp.sa_adv_timer);
}
}
{
THREAD_OFF(sa->sa_state_timer);
if (start) {
- thread_add_timer(sa->pim->msdp.master,
- pim_msdp_sa_state_timer_cb, sa,
- PIM_MSDP_SA_HOLD_TIME, &sa->sa_state_timer);
+ event_add_timer(sa->pim->msdp.master,
+ pim_msdp_sa_state_timer_cb, sa,
+ PIM_MSDP_SA_HOLD_TIME, &sa->sa_state_timer);
}
}
struct pim_instance *pim = mp->pim;
THREAD_OFF(mp->hold_timer);
if (start) {
- thread_add_timer(pim->msdp.master, pim_msdp_peer_hold_timer_cb,
- mp, pim->msdp.hold_time, &mp->hold_timer);
+ event_add_timer(pim->msdp.master, pim_msdp_peer_hold_timer_cb,
+ mp, pim->msdp.hold_time, &mp->hold_timer);
}
}
{
THREAD_OFF(mp->ka_timer);
if (start) {
- thread_add_timer(mp->pim->msdp.master,
- pim_msdp_peer_ka_timer_cb, mp,
- mp->pim->msdp.keep_alive, &mp->ka_timer);
+ event_add_timer(mp->pim->msdp.master, pim_msdp_peer_ka_timer_cb,
+ mp, mp->pim->msdp.keep_alive, &mp->ka_timer);
}
}
{
THREAD_OFF(mp->cr_timer);
if (start) {
- thread_add_timer(mp->pim->msdp.master,
- pim_msdp_peer_cr_timer_cb, mp,
- mp->pim->msdp.connection_retry, &mp->cr_timer);
+ event_add_timer(mp->pim->msdp.master, pim_msdp_peer_cr_timer_cb,
+ mp, mp->pim->msdp.connection_retry,
+ &mp->cr_timer);
}
}
};
#define PIM_MSDP_PEER_READ_ON(mp) \
- thread_add_read(mp->pim->msdp.master, pim_msdp_read, mp, mp->fd, \
- &mp->t_read)
+ event_add_read(mp->pim->msdp.master, pim_msdp_read, mp, mp->fd, \
+ &mp->t_read)
#define PIM_MSDP_PEER_WRITE_ON(mp) \
- thread_add_write(mp->pim->msdp.master, pim_msdp_write, mp, mp->fd, \
- &mp->t_write)
+ event_add_write(mp->pim->msdp.master, pim_msdp_write, mp, mp->fd, \
+ &mp->t_write)
#define PIM_MSDP_PEER_READ_OFF(mp) thread_cancel(&mp->t_read)
#define PIM_MSDP_PEER_WRITE_OFF(mp) thread_cancel(&mp->t_write)
return;
}
pim->msdp.listener.thread = NULL;
- thread_add_read(router->master, pim_msdp_sock_accept, pim, accept_sock,
- &pim->msdp.listener.thread);
+ event_add_read(router->master, pim_msdp_sock_accept, pim, accept_sock,
+ &pim->msdp.listener.thread);
/* accept client connection. */
msdp_sock = sockunion_accept(accept_sock, &su);
/* add accept thread */
listener->fd = sock;
memcpy(&listener->su, &sin, socklen);
- thread_add_read(pim->msdp.master, pim_msdp_sock_accept, pim, sock,
- &listener->thread);
+ event_add_read(pim->msdp.master, pim_msdp_sock_accept, pim, sock,
+ &listener->thread);
pim->msdp.flags |= PIM_MSDPF_LISTENER;
return 0;
__func__, neigh->holdtime, &neigh->source_addr,
neigh->interface->name);
- thread_add_timer(router->master, on_neighbor_timer, neigh,
- neigh->holdtime, &neigh->t_expire_timer);
+ event_add_timer(router->master, on_neighbor_timer, neigh,
+ neigh->holdtime, &neigh->t_expire_timer);
}
static void on_neighbor_jp_timer(struct event *t)
rpf.rpf_addr = neigh->source_addr;
pim_joinprune_send(&rpf, neigh->upstream_jp_agg);
- thread_add_timer(router->master, on_neighbor_jp_timer, neigh,
- router->t_periodic, &neigh->jp_timer);
+ event_add_timer(router->master, on_neighbor_jp_timer, neigh,
+ router->t_periodic, &neigh->jp_timer);
}
static void pim_neighbor_start_jp_timer(struct pim_neighbor *neigh)
{
THREAD_OFF(neigh->jp_timer);
- thread_add_timer(router->master, on_neighbor_jp_timer, neigh,
- router->t_periodic, &neigh->jp_timer);
+ event_add_timer(router->master, on_neighbor_jp_timer, neigh,
+ router->t_periodic, &neigh->jp_timer);
}
static struct pim_neighbor *
zlog_debug("Scheduling READ event on PIM socket fd=%d",
pim_ifp->pim_sock_fd);
}
- thread_add_read(router->master, pim_sock_read, ifp,
- pim_ifp->pim_sock_fd, &pim_ifp->t_pim_sock_read);
+ event_add_read(router->master, pim_sock_read, ifp, pim_ifp->pim_sock_fd,
+ &pim_ifp->t_pim_sock_read);
}
static int pim_sock_open(struct interface *ifp)
pim_ifp->pim_hello_period, ifp->name);
}
THREAD_OFF(pim_ifp->t_pim_hello_timer);
- thread_add_timer(router->master, on_pim_hello_send, ifp,
- pim_ifp->pim_hello_period,
- &pim_ifp->t_pim_hello_timer);
+ event_add_timer(router->master, on_pim_hello_send, ifp,
+ pim_ifp->pim_hello_period, &pim_ifp->t_pim_hello_timer);
}
/*
random_msec, ifp->name);
}
- thread_add_timer_msec(router->master, on_pim_hello_send, ifp,
- random_msec, &pim_ifp->t_pim_hello_timer);
+ event_add_timer_msec(router->master, on_pim_hello_send, ifp,
+ random_msec, &pim_ifp->t_pim_hello_timer);
}
int pim_sock_add(struct interface *ifp)
static void ssmpingd_read_on(struct ssmpingd_sock *ss)
{
- thread_add_read(router->master, ssmpingd_sock_read, ss, ss->sock_fd,
- &ss->t_sock_read);
+ event_add_read(router->master, ssmpingd_sock_read, ss, ss->sock_fd,
+ &ss->t_sock_read);
}
static struct ssmpingd_sock *ssmpingd_new(struct pim_instance *pim,
pim_jp_agg_add_group(nbr->upstream_jp_agg, up, 1, nbr);
else {
THREAD_OFF(up->t_join_timer);
- thread_add_timer(router->master, on_join_timer, up,
- router->t_periodic, &up->t_join_timer);
+ event_add_timer(router->master, on_join_timer, up,
+ router->t_periodic, &up->t_join_timer);
}
pim_jp_agg_upstream_verification(up, true);
}
}
THREAD_OFF(up->t_join_timer);
- thread_add_timer_msec(router->master, on_join_timer, up, interval_msec,
- &up->t_join_timer);
+ event_add_timer_msec(router->master, on_join_timer, up, interval_msec,
+ &up->t_join_timer);
}
void pim_update_suppress_timers(uint32_t suppress_time)
up->sg_str);
}
THREAD_OFF(up->t_ka_timer);
- thread_add_timer(router->master, pim_upstream_keep_alive_timer, up,
- time, &up->t_ka_timer);
+ event_add_timer(router->master, pim_upstream_keep_alive_timer, up, time,
+ &up->t_ka_timer);
/* any time keepalive is started against a SG we will have to
* re-evaluate our active source database */
void pim_upstream_msdp_reg_timer_start(struct pim_upstream *up)
{
THREAD_OFF(up->t_msdp_reg_timer);
- thread_add_timer(router->master, pim_upstream_msdp_reg_timer, up,
- PIM_MSDP_REG_RXED_PERIOD, &up->t_msdp_reg_timer);
+ event_add_timer(router->master, pim_upstream_msdp_reg_timer, up,
+ PIM_MSDP_REG_RXED_PERIOD, &up->t_msdp_reg_timer);
pim_msdp_sa_local_update(up);
}
"%s: (S,G)=%s Starting upstream register stop timer %d",
__func__, up->sg_str, time);
}
- thread_add_timer(router->master, pim_upstream_register_stop_timer, up,
- time, &up->t_rs_timer);
+ event_add_timer(router->master, pim_upstream_register_stop_timer, up,
+ time, &up->t_rs_timer);
}
int pim_upstream_inherited_olist_decide(struct pim_instance *pim,
{
THREAD_OFF(vxlan_info.work_timer);
if (start)
- thread_add_timer(router->master, pim_vxlan_work_timer_cb, NULL,
- PIM_VXLAN_WORK_TIME, &vxlan_info.work_timer);
+ event_add_timer(router->master, pim_vxlan_work_timer_cb, NULL,
+ PIM_VXLAN_WORK_TIME, &vxlan_info.work_timer);
}
/**************************** vxlan origination mroutes ***********************
router->rpf_cache_refresh_delay_msec);
}
- thread_add_timer_msec(router->master, on_rpf_cache_refresh, pim,
- router->rpf_cache_refresh_delay_msec,
- &pim->rpf_cache_refresher);
+ event_add_timer_msec(router->master, on_rpf_cache_refresh, pim,
+ router->rpf_cache_refresh_delay_msec,
+ &pim->rpf_cache_refresher);
}
static void pim_zebra_connected(struct zclient *zclient)
return;
}
- thread_add_timer(router->master, zclient_lookup_read_pipe, zlookup, 60,
- &zlookup_read);
+ event_add_timer(router->master, zclient_lookup_read_pipe, zlookup, 60,
+ &zlookup_read);
}
/* Schedule connection with delay. */
static void zclient_lookup_sched(struct zclient *zlookup, int delay)
{
- thread_add_timer(router->master, zclient_lookup_connect, zlookup, delay,
- &zlookup->t_connect);
+ event_add_timer(router->master, zclient_lookup_connect, zlookup, delay,
+ &zlookup->t_connect);
zlog_notice("%s: zclient lookup connection scheduled for %d seconds",
__func__, delay);
/* Schedule connection for now. */
static void zclient_lookup_sched_now(struct zclient *zlookup)
{
- thread_add_event(router->master, zclient_lookup_connect, zlookup, 0,
- &zlookup->t_connect);
+ event_add_event(router->master, zclient_lookup_connect, zlookup, 0,
+ &zlookup->t_connect);
zlog_notice("%s: zclient lookup immediate connection scheduled",
__func__);
}
zclient_lookup_nexthop_once(pim, nexthop_tab, 10, l);
- thread_add_timer(router->master, zclient_lookup_read_pipe, zlookup, 60,
- &zlookup_read);
+ event_add_timer(router->master, zclient_lookup_read_pipe, zlookup, 60,
+ &zlookup_read);
}
int zclient_lookup_nexthop(struct pim_instance *pim,
if (PIM_DEBUG_MLAG)
zlog_debug(":%s: Scheduling PIM MLAG write Thread",
__func__);
- thread_add_event(router->master, pim_mlag_zthread_handler, NULL,
- 0, &router->zpthread_mlag_write);
+ event_add_event(router->master, pim_mlag_zthread_handler, NULL,
+ 0, &router->zpthread_mlag_write);
}
return (0);
}
zlog_debug("turn on %s", ifp->name);
/* Add interface wake up thread. */
- thread_add_timer(master, rip_interface_wakeup, ifp, 1,
- &ri->t_wakeup);
+ event_add_timer(master, rip_interface_wakeup, ifp, 1,
+ &ri->t_wakeup);
rip_connect_set(ifp, 1);
} else if (ri->running) {
/* Might as well clean up the route table as well
}
/* Update timeout thread. */
- thread_add_timer(master, rip_peer_timeout, peer, RIP_PEER_TIMER_DEFAULT,
- &peer->t_timeout);
+ event_add_timer(master, rip_peer_timeout, peer, RIP_PEER_TIMER_DEFAULT,
+ &peer->t_timeout);
/* Last update time set. */
time(&peer->uptime);
{
if (rinfo->metric != RIP_METRIC_INFINITY) {
THREAD_OFF(rinfo->t_timeout);
- thread_add_timer(master, rip_timeout, rinfo, rip->timeout_time,
- &rinfo->t_timeout);
+ event_add_timer(master, rip_timeout, rinfo, rip->timeout_time,
+ &rinfo->t_timeout);
}
}
update is triggered when the timer expires. */
interval = (frr_weak_random() % 5) + 1;
- thread_add_timer(master, rip_triggered_interval, rip, interval,
- &rip->t_triggered_interval);
+ event_add_timer(master, rip_triggered_interval, rip, interval,
+ &rip->t_triggered_interval);
}
/* Withdraw redistributed route. */
switch (event) {
case RIP_READ:
- thread_add_read(master, rip_read, rip, sock, &rip->t_read);
+ event_add_read(master, rip_read, rip, sock, &rip->t_read);
break;
case RIP_UPDATE_EVENT:
THREAD_OFF(rip->t_update);
jitter = rip_update_jitter(rip->update_time);
- thread_add_timer(master, rip_update, rip,
- sock ? 2 : rip->update_time + jitter,
- &rip->t_update);
+ event_add_timer(master, rip_update, rip,
+ sock ? 2 : rip->update_time + jitter,
+ &rip->t_update);
break;
case RIP_TRIGGERED_UPDATE:
if (rip->t_triggered_interval)
rip->trigger = 1;
else
- thread_add_event(master, rip_triggered_update, rip, 0,
- &rip->t_triggered_update);
+ event_add_event(master, rip_triggered_update, rip, 0,
+ &rip->t_triggered_update);
break;
default:
break;
};
/* Macro for timer turn on. */
-#define RIP_TIMER_ON(T,F,V) thread_add_timer (master, (F), rinfo, (V), &(T))
+#define RIP_TIMER_ON(T, F, V) event_add_timer(master, (F), rinfo, (V), &(T))
#define RIP_OFFSET_LIST_IN 0
#define RIP_OFFSET_LIST_OUT 1
zlog_info("RIPng INTERFACE ON %s", ifp->name);
/* Add interface wake up thread. */
- thread_add_timer(master, ripng_interface_wakeup, ifp, 1,
- &ri->t_wakeup);
+ event_add_timer(master, ripng_interface_wakeup, ifp, 1,
+ &ri->t_wakeup);
ripng_connect_set(ifp, 1);
} else {
}
/* Update timeout thread. */
- thread_add_timer(master, ripng_peer_timeout, peer,
- RIPNG_PEER_TIMER_DEFAULT, &peer->t_timeout);
+ event_add_timer(master, ripng_peer_timeout, peer,
+ RIPNG_PEER_TIMER_DEFAULT, &peer->t_timeout);
/* Last update time set. */
time(&peer->uptime);
{
if (rinfo->metric != RIPNG_METRIC_INFINITY) {
THREAD_OFF(rinfo->t_timeout);
- thread_add_timer(master, ripng_timeout, rinfo,
- ripng->timeout_time, &rinfo->t_timeout);
+ event_add_timer(master, ripng_timeout, rinfo,
+ ripng->timeout_time, &rinfo->t_timeout);
}
}
update is triggered when the timer expires. */
interval = (frr_weak_random() % 5) + 1;
- thread_add_timer(master, ripng_triggered_interval, ripng, interval,
- &ripng->t_triggered_interval);
+ event_add_timer(master, ripng_triggered_interval, ripng, interval,
+ &ripng->t_triggered_interval);
}
/* Write routing table entry to the stream and return next index of
switch (event) {
case RIPNG_READ:
- thread_add_read(master, ripng_read, ripng, sock,
- &ripng->t_read);
+ event_add_read(master, ripng_read, ripng, sock, &ripng->t_read);
break;
case RIPNG_UPDATE_EVENT:
THREAD_OFF(ripng->t_update);
/* Update timer jitter. */
jitter = ripng_update_jitter(ripng->update_time);
- thread_add_timer(master, ripng_update, ripng,
- sock ? 2 : ripng->update_time + jitter,
- &ripng->t_update);
+ event_add_timer(master, ripng_update, ripng,
+ sock ? 2 : ripng->update_time + jitter,
+ &ripng->t_update);
break;
case RIPNG_TRIGGERED_UPDATE:
if (ripng->t_triggered_interval)
ripng->trigger = 1;
else
- thread_add_event(master, ripng_triggered_update, ripng,
- 0, &ripng->t_triggered_update);
+ event_add_event(master, ripng_triggered_update, ripng,
+ 0, &ripng->t_triggered_update);
break;
case RIPNG_ZEBRA:
case RIPNG_REQUEST_EVENT:
};
/* RIPng timer on/off macro. */
-#define RIPNG_TIMER_ON(T,F,V) thread_add_timer (master, (F), rinfo, (V), &(T))
+#define RIPNG_TIMER_ON(T, F, V) event_add_timer(master, (F), rinfo, (V), &(T))
#define RIPNG_OFFSET_LIST_IN 0
#define RIPNG_OFFSET_LIST_OUT 1
getrusage(RUSAGE_SELF, &lp_rusage);
#endif
- thread_add_timer_msec(master, logpump_done, NULL, 0, NULL);
+ event_add_timer_msec(master, logpump_done, NULL, 0, NULL);
return NULL;
}
int *count = THREAD_ARG(thread);
printf("run %d of timer\n", (*count)++);
- thread_add_timer(master, test_timer, count, 5, NULL);
+ event_add_timer(master, test_timer, count, 5, NULL);
}
static void test_timer_init(void)
{
- thread_add_timer(master, test_timer, &timer_count, 10, NULL);
+ event_add_timer(master, test_timer, &timer_count, 10, NULL);
}
static void test_vty_init(void)
// Signal FRR event loop to stop
test_debug("client: pthread: adding event to stop us");
- thread_add_event(master, grpc_thread_stop, NULL, 0, NULL);
+ event_add_event(master, grpc_thread_stop, NULL, 0, NULL);
test_debug("client: pthread: DONE (returning)");
static_startup();
- thread_add_event(master, grpc_thread_start, NULL, 0, NULL);
+ event_add_event(master, grpc_thread_start, NULL, 0, NULL);
/* Event Loop */
struct event thread;
slow_func(ws->vty, ws->str, ws->i);
ws->i++;
if (thread_should_yield(thread)) {
- thread_add_timer_msec(master, clear_something, ws, 0,
- NULL);
+ event_add_timer_msec(master, clear_something, ws, 0,
+ NULL);
return;
}
}
ws->vty = vty;
ws->i = ITERS_FIRST;
- thread_add_timer_msec(master, clear_something, ws, 0, NULL);
+ event_add_timer_msec(master, clear_something, ws, 0, NULL);
return CMD_SUCCESS;
}
/* Schedule timers to expire in 0..5 seconds */
interval_msec = prng_rand(prng) % 5000;
arg = XMALLOC(MTYPE_TMP, TIMESTR_LEN + 1);
- thread_add_timer_msec(master, timer_func, arg, interval_msec,
- &timers[i]);
+ event_add_timer_msec(master, timer_func, arg, interval_msec,
+ &timers[i]);
ret = snprintf(arg, TIMESTR_LEN + 1, "%lld.%06lld",
(long long)timers[i]->u.sands.tv_sec,
(long long)timers[i]->u.sands.tv_usec);
/* create thread structures so they won't be allocated during the
* time measurement */
for (i = 0; i < SCHEDULE_TIMERS; i++) {
- thread_add_timer_msec(master, dummy_func, NULL, 0, &timers[i]);
+ event_add_timer_msec(master, dummy_func, NULL, 0, &timers[i]);
}
for (i = 0; i < SCHEDULE_TIMERS; i++)
thread_cancel(&timers[i]);
long interval_msec;
interval_msec = prng_rand(prng) % (100 * SCHEDULE_TIMERS);
- thread_add_timer_msec(master, dummy_func, NULL, interval_msec,
- &timers[i]);
+ event_add_timer_msec(master, dummy_func, NULL, interval_msec,
+ &timers[i]);
}
monotime(&tv_lap);
printf("server recv: %s\n", buf);
fflush(stdout);
- frrzmq_thread_add_write_msg(master, serverwritefn, NULL, msg_id,
- zmqsock, &cb);
+ frrzmq_event_add_write_msg(master, serverwritefn, NULL, msg_id, zmqsock,
+ &cb);
}
static void serverfn(void *arg, void *zmqsock)
frrzmq_thread_cancel(&cb, &cb->read);
frrzmq_thread_cancel(&cb, &cb->write);
- frrzmq_thread_add_read_part(master, serverpartfn, NULL, NULL, zmqsock,
- &cb);
+ frrzmq_event_add_read_part(master, serverpartfn, NULL, NULL, zmqsock,
+ &cb);
}
static void sigchld(void)
exit(1);
}
- frrzmq_thread_add_read_msg(master, serverfn, NULL, NULL, zmqsock, &cb);
+ frrzmq_event_add_read_msg(master, serverfn, NULL, NULL, zmqsock, &cb);
write(syncfd, &dummy, sizeof(dummy));
while (thread_fetch(master, &t))
{
#define thread_prefix "_"
static const char *const names[] = {
- thread_prefix "thread_add_read_write",
- thread_prefix "thread_add_timer",
- thread_prefix "thread_add_timer_msec",
- thread_prefix "thread_add_timer_tv",
- thread_prefix "thread_add_event",
+ thread_prefix "event_add_read_write",
+ thread_prefix "event_add_timer",
+ thread_prefix "event_add_timer_msec",
+ thread_prefix "event_add_timer_tv",
+ thread_prefix "event_add_event",
thread_prefix "thread_execute",
};
size_t i;
if (pkt->hdr.priority == 0) {
vrrp_send_advertisement(r);
THREAD_OFF(r->t_adver_timer);
- thread_add_timer_msec(
- master, vrrp_adver_timer_expire, r,
- r->vr->advertisement_interval * CS2MS,
- &r->t_adver_timer);
+ event_add_timer_msec(master, vrrp_adver_timer_expire, r,
+ r->vr->advertisement_interval *
+ CS2MS,
+ &r->t_adver_timer);
} else if (pkt->hdr.priority > r->priority
|| ((pkt->hdr.priority == r->priority)
&& addrcmp > 0)) {
}
vrrp_recalculate_timers(r);
THREAD_OFF(r->t_master_down_timer);
- thread_add_timer_msec(master,
- vrrp_master_down_timer_expire, r,
- r->master_down_interval * CS2MS,
- &r->t_master_down_timer);
+ event_add_timer_msec(master,
+ vrrp_master_down_timer_expire, r,
+ r->master_down_interval * CS2MS,
+ &r->t_master_down_timer);
vrrp_change_state(r, VRRP_STATE_BACKUP);
} else {
/* Discard advertisement */
case VRRP_STATE_BACKUP:
if (pkt->hdr.priority == 0) {
THREAD_OFF(r->t_master_down_timer);
- thread_add_timer_msec(
+ event_add_timer_msec(
master, vrrp_master_down_timer_expire, r,
r->skew_time * CS2MS, &r->t_master_down_timer);
} else if (!r->vr->preempt_mode
}
vrrp_recalculate_timers(r);
THREAD_OFF(r->t_master_down_timer);
- thread_add_timer_msec(master,
- vrrp_master_down_timer_expire, r,
- r->master_down_interval * CS2MS,
- &r->t_master_down_timer);
+ event_add_timer_msec(master,
+ vrrp_master_down_timer_expire, r,
+ r->master_down_interval * CS2MS,
+ &r->t_master_down_timer);
} else if (r->vr->preempt_mode
&& pkt->hdr.priority < r->priority) {
/* Discard advertisement */
memset(r->ibuf, 0x00, sizeof(r->ibuf));
if (resched)
- thread_add_read(master, vrrp_read, r, r->sock_rx, &r->t_read);
+ event_add_read(master, vrrp_read, r, r->sock_rx, &r->t_read);
}
/*
vrrp_send_advertisement(r);
/* Reset the Adver_Timer to Advertisement_Interval */
- thread_add_timer_msec(master, vrrp_adver_timer_expire, r,
- r->vr->advertisement_interval * CS2MS,
- &r->t_adver_timer);
+ event_add_timer_msec(master, vrrp_adver_timer_expire, r,
+ r->vr->advertisement_interval * CS2MS,
+ &r->t_adver_timer);
} else {
zlog_err(VRRP_LOGPFX VRRP_LOGPFX_VRID VRRP_LOGPFX_FAM
"Adver_Timer expired in state '%s'; this is a bug",
"Master_Down_Timer expired",
r->vr->vrid, family2str(r->family));
- thread_add_timer_msec(master, vrrp_adver_timer_expire, r,
- r->vr->advertisement_interval * CS2MS,
- &r->t_adver_timer);
+ event_add_timer_msec(master, vrrp_adver_timer_expire, r,
+ r->vr->advertisement_interval * CS2MS,
+ &r->t_adver_timer);
vrrp_change_state(r, VRRP_STATE_MASTER);
}
}
/* Schedule listener */
- thread_add_read(master, vrrp_read, r, r->sock_rx, &r->t_read);
+ event_add_read(master, vrrp_read, r, r->sock_rx, &r->t_read);
/* Configure effective priority */
assert(listhead(r->addrs));
}
if (r->priority == VRRP_PRIO_MASTER) {
- thread_add_timer_msec(master, vrrp_adver_timer_expire, r,
- r->vr->advertisement_interval * CS2MS,
- &r->t_adver_timer);
+ event_add_timer_msec(master, vrrp_adver_timer_expire, r,
+ r->vr->advertisement_interval * CS2MS,
+ &r->t_adver_timer);
vrrp_change_state(r, VRRP_STATE_MASTER);
} else {
r->master_adver_interval = r->vr->advertisement_interval;
vrrp_recalculate_timers(r);
- thread_add_timer_msec(master, vrrp_master_down_timer_expire, r,
- r->master_down_interval * CS2MS,
- &r->t_master_down_timer);
+ event_add_timer_msec(master, vrrp_master_down_timer_expire, r,
+ r->master_down_interval * CS2MS,
+ &r->t_master_down_timer);
vrrp_change_state(r, VRRP_STATE_BACKUP);
}
const char *text;
ssize_t ret;
- thread_add_read(master, vtysh_log_read, vclient, vclient->log_fd,
- &vclient->log_reader);
+ event_add_read(master, vtysh_log_read, vclient, vclient->log_fd,
+ &vclient->log_reader);
ret = recv(vclient->log_fd, &buf, sizeof(buf), 0);
if (fd != -1) {
set_nonblocking(fd);
vclient->log_fd = fd;
- thread_add_read(master, vtysh_log_read, vclient,
- vclient->log_fd,
- &vclient->log_reader);
+ event_add_read(master, vtysh_log_read, vclient,
+ vclient->log_fd,
+ &vclient->log_reader);
}
if (ret != CMD_SUCCESS) {
vty_out(vty, "%% failed to enable logs on %s\n",
static void vtysh_rl_read(struct event *thread)
{
- thread_add_read(master, vtysh_rl_read, NULL, STDIN_FILENO,
- &vtysh_rl_read_thread);
+ event_add_read(master, vtysh_rl_read, NULL, STDIN_FILENO,
+ &vtysh_rl_read_thread);
rl_callback_read_char();
}
master = thread_master_create(NULL);
rl_callback_handler_install(vtysh_prompt(), vtysh_rl_callback);
- thread_add_read(master, vtysh_rl_read, NULL, STDIN_FILENO,
- &vtysh_rl_read_thread);
+ event_add_read(master, vtysh_rl_read, NULL, STDIN_FILENO,
+ &vtysh_rl_read_thread);
while (!vtysh_loop_exited && thread_fetch(master, &thread))
thread_call(&thread);
zlog_err(
"%s %s child process appears to still be reading configuration, delaying for another %lu time",
restart->what, restart->name, gs.restart_timeout);
- thread_add_timer(master, restart_kill, restart,
- gs.restart_timeout, &restart->t_kill);
+ event_add_timer(master, restart_kill, restart,
+ gs.restart_timeout, &restart->t_kill);
return;
}
(long)delay.tv_sec, (restart->kills ? SIGKILL : SIGTERM));
kill(-restart->pid, (restart->kills ? SIGKILL : SIGTERM));
restart->kills++;
- thread_add_timer(master, restart_kill, restart, gs.restart_timeout,
- &restart->t_kill);
+ event_add_timer(master, restart_kill, restart, gs.restart_timeout,
+ &restart->t_kill);
}
static struct restart_info *find_child(pid_t child)
snprintf(cmd, sizeof(cmd), command, restart->name);
#pragma GCC diagnostic pop
if ((restart->pid = run_background(cmd)) > 0) {
- thread_add_timer(master, restart_kill, restart,
- gs.restart_timeout, &restart->t_kill);
+ event_add_timer(master, restart_kill, restart,
+ gs.restart_timeout, &restart->t_kill);
restart->what = cmdtype;
gs.numpids++;
} else
#define SET_READ_HANDLER(DMN) \
do { \
(DMN)->t_read = NULL; \
- thread_add_read(master, handle_read, (DMN), (DMN)->fd, \
- &(DMN)->t_read); \
+ event_add_read(master, handle_read, (DMN), (DMN)->fd, \
+ &(DMN)->t_read); \
} while (0);
#define SET_WAKEUP_DOWN(DMN) \
do { \
(DMN)->t_wakeup = NULL; \
- thread_add_timer_msec(master, wakeup_down, (DMN), \
- FUZZY(gs.period), &(DMN)->t_wakeup); \
+ event_add_timer_msec(master, wakeup_down, (DMN), \
+ FUZZY(gs.period), &(DMN)->t_wakeup); \
} while (0);
#define SET_WAKEUP_UNRESPONSIVE(DMN) \
do { \
(DMN)->t_wakeup = NULL; \
- thread_add_timer_msec(master, wakeup_unresponsive, (DMN), \
- FUZZY(gs.period), &(DMN)->t_wakeup); \
+ event_add_timer_msec(master, wakeup_unresponsive, (DMN), \
+ FUZZY(gs.period), &(DMN)->t_wakeup); \
} while (0);
#define SET_WAKEUP_ECHO(DMN) \
do { \
(DMN)->t_wakeup = NULL; \
- thread_add_timer_msec(master, wakeup_send_echo, (DMN), \
- FUZZY(gs.period), &(DMN)->t_wakeup); \
+ event_add_timer_msec(master, wakeup_send_echo, (DMN), \
+ FUZZY(gs.period), &(DMN)->t_wakeup); \
} while (0);
static void wakeup_down(struct event *t_wakeup)
THREAD_OFF(gs.t_operational);
- thread_add_timer(master, daemon_restarting_operational, NULL,
- gs.operational_timeout, &gs.t_operational);
+ event_add_timer(master, daemon_restarting_operational, NULL,
+ gs.operational_timeout, &gs.t_operational);
}
SET_WAKEUP_ECHO(dmn);
zlog_debug("%s: connection in progress", dmn->name);
dmn->state = DAEMON_CONNECTING;
dmn->fd = sock;
- thread_add_write(master, check_connect, dmn, dmn->fd,
- &dmn->t_write);
- thread_add_timer(master, wakeup_connect_hanging, dmn,
- gs.timeout, &dmn->t_wakeup);
+ event_add_write(master, check_connect, dmn, dmn->fd,
+ &dmn->t_write);
+ event_add_timer(master, wakeup_connect_hanging, dmn, gs.timeout,
+ &dmn->t_wakeup);
SET_READ_HANDLER(dmn);
return 0;
}
gs.phase = new_phase;
thread_cancel(&gs.t_phase_hanging);
- thread_add_timer(master, phase_hanging, NULL, PHASE_TIMEOUT,
- &gs.t_phase_hanging);
+ event_add_timer(master, phase_hanging, NULL, PHASE_TIMEOUT,
+ &gs.t_phase_hanging);
}
static void phase_check(void)
daemon_down(dmn, why);
} else {
gettimeofday(&dmn->echo_sent, NULL);
- thread_add_timer(master, wakeup_no_answer, dmn, gs.timeout,
- &dmn->t_wakeup);
+ event_add_timer(master, wakeup_no_answer, dmn, gs.timeout,
+ &dmn->t_wakeup);
}
}
struct daemon *dmn, **add = &gs.daemons;
char alldaemons[512] = "", *p = alldaemons;
- thread_add_timer_msec(master, startup_timeout, NULL, STARTUP_TIMEOUT,
- &gs.t_startup_timeout);
+ event_add_timer_msec(master, startup_timeout, NULL, STARTUP_TIMEOUT,
+ &gs.t_startup_timeout);
for (i = optind; i < argc; i++) {
dmn = XCALLOC(MTYPE_WATCHFRR_DAEMON, sizeof(*dmn));
gs.numdaemons++;
gs.numdown++;
dmn->fd = -1;
- thread_add_timer_msec(master, wakeup_init, dmn, 0,
- &dmn->t_wakeup);
+ event_add_timer_msec(master, wakeup_init, dmn, 0,
+ &dmn->t_wakeup);
dmn->restart.interval = gs.min_restart_interval;
*add = dmn;
add = &dmn->next;
};
#define FPM_RECONNECT(fnc) \
- thread_add_event((fnc)->fthread->master, fpm_process_event, (fnc), \
- FNE_INTERNAL_RECONNECT, &(fnc)->t_event)
+ event_add_event((fnc)->fthread->master, fpm_process_event, (fnc), \
+ FNE_INTERNAL_RECONNECT, &(fnc)->t_event)
#define WALK_FINISH(fnc, ev) \
- thread_add_event((fnc)->fthread->master, fpm_process_event, (fnc), \
- (ev), NULL)
+ event_add_event((fnc)->fthread->master, fpm_process_event, (fnc), \
+ (ev), NULL)
/*
* Prototypes.
memcpy(&sin6->sin6_addr, naddr, sizeof(sin6->sin6_addr));
ask_reconnect:
- thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
- FNE_RECONNECT, &gfnc->t_event);
+ event_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
+ FNE_RECONNECT, &gfnc->t_event);
return CMD_SUCCESS;
}
"FPM remote listening server port\n"
"Remote FPM server port\n")
{
- thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
- FNE_DISABLE, &gfnc->t_event);
+ event_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
+ FNE_DISABLE, &gfnc->t_event);
return CMD_SUCCESS;
}
if (gfnc->use_nhg)
return CMD_SUCCESS;
- thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
- FNE_TOGGLE_NHG, &gfnc->t_nhg);
+ event_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
+ FNE_TOGGLE_NHG, &gfnc->t_nhg);
return CMD_SUCCESS;
}
if (!gfnc->use_nhg)
return CMD_SUCCESS;
- thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
- FNE_TOGGLE_NHG, &gfnc->t_nhg);
+ event_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
+ FNE_TOGGLE_NHG, &gfnc->t_nhg);
return CMD_SUCCESS;
}
FPM_STR
"FPM statistic counters\n")
{
- thread_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
- FNE_RESET_COUNTERS, &gfnc->t_event);
+ event_add_event(gfnc->fthread->master, fpm_process_event, gfnc,
+ FNE_RESET_COUNTERS, &gfnc->t_event);
return CMD_SUCCESS;
}
if (fnc->disabled)
return;
- thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
- &fnc->t_connect);
+ event_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
+ &fnc->t_connect);
}
static void fpm_read(struct event *t)
}
/* Schedule the next read */
- thread_add_read(fnc->fthread->master, fpm_read, fnc, fnc->socket,
- &fnc->t_read);
+ event_add_read(fnc->fthread->master, fpm_read, fnc, fnc->socket,
+ &fnc->t_read);
/* We've got an interruption. */
if (rv == -2)
* Starting with LSPs walk all FPM objects, marking them
* as unsent and then replaying them.
*/
- thread_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0,
- &fnc->t_lspreset);
+ event_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0,
+ &fnc->t_lspreset);
/* Permit receiving messages now. */
- thread_add_read(fnc->fthread->master, fpm_read, fnc,
- fnc->socket, &fnc->t_read);
+ event_add_read(fnc->fthread->master, fpm_read, fnc, fnc->socket,
+ &fnc->t_read);
}
frr_mutex_lock_autounlock(&fnc->obuf_mutex);
/* Stream is not empty yet, we must schedule more writes. */
if (STREAM_READABLE(fnc->obuf)) {
stream_pulldown(fnc->obuf);
- thread_add_write(fnc->fthread->master, fpm_write, fnc,
- fnc->socket, &fnc->t_write);
+ event_add_write(fnc->fthread->master, fpm_write, fnc,
+ fnc->socket, &fnc->t_write);
return;
}
}
if (sock == -1) {
zlog_err("%s: fpm socket failed: %s", __func__,
strerror(errno));
- thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
- &fnc->t_connect);
+ event_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
+ &fnc->t_connect);
return;
}
close(sock);
zlog_warn("%s: fpm connection failed: %s", __func__,
strerror(errno));
- thread_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
- &fnc->t_connect);
+ event_add_timer(fnc->fthread->master, fpm_connect, fnc, 3,
+ &fnc->t_connect);
return;
}
fnc->connecting = (errno == EINPROGRESS);
fnc->socket = sock;
if (!fnc->connecting)
- thread_add_read(fnc->fthread->master, fpm_read, fnc, sock,
- &fnc->t_read);
- thread_add_write(fnc->fthread->master, fpm_write, fnc, sock,
- &fnc->t_write);
+ event_add_read(fnc->fthread->master, fpm_read, fnc, sock,
+ &fnc->t_read);
+ event_add_write(fnc->fthread->master, fpm_write, fnc, sock,
+ &fnc->t_write);
/*
* Starting with LSPs walk all FPM objects, marking them
* If we are not connected, then delay the objects reset/send.
*/
if (!fnc->connecting)
- thread_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0,
- &fnc->t_lspreset);
+ event_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0,
+ &fnc->t_lspreset);
}
/**
memory_order_relaxed);
/* Tell the thread to start writing. */
- thread_add_write(fnc->fthread->master, fpm_write, fnc, fnc->socket,
- &fnc->t_write);
+ event_add_write(fnc->fthread->master, fpm_write, fnc, fnc->socket,
+ &fnc->t_write);
return 0;
}
WALK_FINISH(fnc, FNE_LSP_FINISHED);
/* Now move onto routes */
- thread_add_timer(zrouter.master, fpm_nhg_reset, fnc, 0,
- &fnc->t_nhgreset);
+ event_add_timer(zrouter.master, fpm_nhg_reset, fnc, 0,
+ &fnc->t_nhgreset);
} else {
/* Didn't finish - reschedule LSP walk */
- thread_add_timer(zrouter.master, fpm_lsp_send, fnc, 0,
- &fnc->t_lspwalk);
+ event_add_timer(zrouter.master, fpm_lsp_send, fnc, 0,
+ &fnc->t_lspwalk);
}
}
/* We are done sending next hops, lets install the routes now. */
if (fna.complete) {
WALK_FINISH(fnc, FNE_NHG_FINISHED);
- thread_add_timer(zrouter.master, fpm_rib_reset, fnc, 0,
- &fnc->t_ribreset);
+ event_add_timer(zrouter.master, fpm_rib_reset, fnc, 0,
+ &fnc->t_ribreset);
} else /* Otherwise reschedule next hop group again. */
- thread_add_timer(zrouter.master, fpm_nhg_send, fnc, 0,
- &fnc->t_nhgwalk);
+ event_add_timer(zrouter.master, fpm_nhg_send, fnc, 0,
+ &fnc->t_nhgwalk);
}
/**
/* Free the temporary allocated context. */
dplane_ctx_fini(&ctx);
- thread_add_timer(zrouter.master, fpm_rib_send,
- fnc, 1, &fnc->t_ribwalk);
+ event_add_timer(zrouter.master, fpm_rib_send,
+ fnc, 1, &fnc->t_ribwalk);
return;
}
WALK_FINISH(fnc, FNE_RIB_FINISHED);
/* Schedule next event: RMAC reset. */
- thread_add_event(zrouter.master, fpm_rmac_reset, fnc, 0,
- &fnc->t_rmacreset);
+ event_add_event(zrouter.master, fpm_rmac_reset, fnc, 0,
+ &fnc->t_rmacreset);
}
/*
zrmac->fwd_info.r_vtep_ip, sticky, 0 /*nhg*/,
0 /*update_flags*/);
if (fpm_nl_enqueue(fra->fnc, fra->ctx) == -1) {
- thread_add_timer(zrouter.master, fpm_rmac_send,
- fra->fnc, 1, &fra->fnc->t_rmacwalk);
+ event_add_timer(zrouter.master, fpm_rmac_send, fra->fnc, 1,
+ &fra->fnc->t_rmacwalk);
fra->complete = false;
}
}
hash_iterate(zrouter.nhgs_id, fpm_nhg_reset_cb, NULL);
/* Schedule next step: send next hop groups. */
- thread_add_event(zrouter.master, fpm_nhg_send, fnc, 0, &fnc->t_nhgwalk);
+ event_add_event(zrouter.master, fpm_nhg_send, fnc, 0, &fnc->t_nhgwalk);
}
/*
hash_iterate(zvrf->lsp_table, fpm_lsp_reset_cb, NULL);
/* Schedule next step: send LSPs */
- thread_add_event(zrouter.master, fpm_lsp_send, fnc, 0, &fnc->t_lspwalk);
+ event_add_event(zrouter.master, fpm_lsp_send, fnc, 0, &fnc->t_lspwalk);
}
/**
}
/* Schedule next step: send RIB routes. */
- thread_add_event(zrouter.master, fpm_rib_send, fnc, 0, &fnc->t_ribwalk);
+ event_add_event(zrouter.master, fpm_rib_send, fnc, 0, &fnc->t_ribwalk);
}
/*
hash_iterate(zrouter.l3vni_table, fpm_unset_l3vni_table, NULL);
/* Schedule next event: send RMAC entries. */
- thread_add_event(zrouter.master, fpm_rmac_send, fnc, 0,
- &fnc->t_rmacwalk);
+ event_add_event(zrouter.master, fpm_rmac_send, fnc, 0,
+ &fnc->t_rmacwalk);
}
static void fpm_process_queue(struct event *t)
/* Re-schedule if we ran out of buffer space */
if (no_bufs)
- thread_add_timer(fnc->fthread->master, fpm_process_queue,
- fnc, 0, &fnc->t_dequeue);
+ event_add_timer(fnc->fthread->master, fpm_process_queue, fnc, 0,
+ &fnc->t_dequeue);
/*
* Let the dataplane thread know if there are items in the
if (atomic_load_explicit(&fnc->counters.ctxqueue_len,
memory_order_relaxed)
> 0)
- thread_add_timer(fnc->fthread->master, fpm_process_queue,
- fnc, 0, &fnc->t_dequeue);
+ event_add_timer(fnc->fthread->master, fpm_process_queue, fnc, 0,
+ &fnc->t_dequeue);
/* Ensure dataplane thread is rescheduled if we hit the work limit */
if (counter >= limit)
return;
zif->speed_update_count++;
- thread_add_timer(zrouter.master, if_zebra_speed_update, ifp,
- SPEED_UPDATE_SLEEP_TIME, &zif->speed_update);
+ event_add_timer(zrouter.master, if_zebra_speed_update, ifp,
+ SPEED_UPDATE_SLEEP_TIME, &zif->speed_update);
thread_ignore_late_timer(zif->speed_update);
}
}
* down upon startup.
*/
zebra_if->speed_update_count = 0;
- thread_add_timer(zrouter.master, if_zebra_speed_update, ifp, 15,
- &zebra_if->speed_update);
+ event_add_timer(zrouter.master, if_zebra_speed_update, ifp, 15,
+ &zebra_if->speed_update);
thread_ignore_late_timer(zebra_if->speed_update);
return 0;
if (zif->flags & ZIF_FLAG_EVPN_MH_UPLINK)
zebra_evpn_mh_uplink_oper_update(zif);
- thread_add_timer(zrouter.master, if_zebra_speed_update, ifp, 0,
- &zif->speed_update);
+ event_add_timer(zrouter.master, if_zebra_speed_update, ifp, 0,
+ &zif->speed_update);
thread_ignore_late_timer(zif->speed_update);
}
timer);
irdp->t_advertise = NULL;
- thread_add_timer(zrouter.master, irdp_send_thread, ifp, timer,
- &irdp->t_advertise);
+ event_add_timer(zrouter.master, irdp_send_thread, ifp, timer,
+ &irdp->t_advertise);
}
static void irdp_if_stop(struct interface *ifp)
return ret;
};
- thread_add_read(zrouter.master, irdp_read_raw, NULL, sock, &t_irdp_raw);
+ event_add_read(zrouter.master, irdp_read_raw, NULL, sock, &t_irdp_raw);
return sock;
}
timer);
irdp->t_advertise = NULL;
- thread_add_timer(zrouter.master, irdp_send_thread, ifp, timer,
- &irdp->t_advertise);
+ event_add_timer(zrouter.master, irdp_send_thread, ifp, timer,
+ &irdp->t_advertise);
}
void irdp_advert_off(struct interface *ifp)
timer = (frr_weak_random() % MAX_RESPONSE_DELAY) + 1;
irdp->t_advertise = NULL;
- thread_add_timer(zrouter.master, irdp_send_thread, ifp, timer,
- &irdp->t_advertise);
+ event_add_timer(zrouter.master, irdp_send_thread, ifp, timer,
+ &irdp->t_advertise);
}
static int irdp_finish(void)
int ret, ifindex = 0;
int irdp_sock = THREAD_FD(r);
- thread_add_read(zrouter.master, irdp_read_raw, NULL, irdp_sock,
- &t_irdp_raw);
+ event_add_read(zrouter.master, irdp_read_raw, NULL, irdp_sock,
+ &t_irdp_raw);
ret = irdp_recvmsg(irdp_sock, (uint8_t *)buf, IRDP_RX_BUF, &ifindex);
netlink_parse_info(netlink_information_fetch, &zns->netlink, &dp_info,
5, false);
- thread_add_read(zrouter.master, kernel_read, zns, zns->netlink.sock,
- &zns->t_netlink);
+ event_add_read(zrouter.master, kernel_read, zns, zns->netlink.sock,
+ &zns->t_netlink);
}
/*
zns->t_netlink = NULL;
- thread_add_read(zrouter.master, kernel_read, zns,
- zns->netlink.sock, &zns->t_netlink);
+ event_add_read(zrouter.master, kernel_read, zns, zns->netlink.sock,
+ &zns->t_netlink);
rt_netlink_init();
}
* shortage and is not harmful for consistency of
* reading the routing socket. Ignore it.
*/
- thread_add_read(zrouter.master, kernel_read, NULL, sock,
- NULL);
+ event_add_read(zrouter.master, kernel_read, NULL, sock,
+ NULL);
return;
#else
flog_err(EC_ZEBRA_RECVMSG_OVERRUN,
if (nbytes == 0)
return;
- thread_add_read(zrouter.master, kernel_read, NULL, sock, NULL);
+ event_add_read(zrouter.master, kernel_read, NULL, sock, NULL);
if (IS_ZEBRA_DEBUG_KERNEL)
rtmsg_debug(&buf.r.rtm);
}
/* kernel_read needs rewrite. */
- thread_add_read(zrouter.master, kernel_read, NULL, routing_sock, NULL);
+ event_add_read(zrouter.master, kernel_read, NULL, routing_sock, NULL);
}
/* Exported interface function. This function simply calls
* we have to have route_read() called before.
*/
zrouter.startup_time = monotime(NULL);
- thread_add_timer(zrouter.master, rib_sweep_route, NULL,
- graceful_restart, &zrouter.sweeper);
+ event_add_timer(zrouter.master, rib_sweep_route, NULL, graceful_restart,
+ &zrouter.sweeper);
/* Needed for BSD routing socket. */
pid = getpid();
switch (event) {
case RTADV_START:
- thread_add_read(zrouter.master, rtadv_read, zvrf, rtadv->sock,
- &rtadv->ra_read);
- thread_add_event(zrouter.master, rtadv_timer, zvrf, 0,
- &rtadv->ra_timer);
+ event_add_read(zrouter.master, rtadv_read, zvrf, rtadv->sock,
+ &rtadv->ra_read);
+ event_add_event(zrouter.master, rtadv_timer, zvrf, 0,
+ &rtadv->ra_timer);
break;
case RTADV_STOP:
THREAD_OFF(rtadv->ra_timer);
THREAD_OFF(rtadv->ra_read);
break;
case RTADV_TIMER:
- thread_add_timer(zrouter.master, rtadv_timer, zvrf, val,
- &rtadv->ra_timer);
+ event_add_timer(zrouter.master, rtadv_timer, zvrf, val,
+ &rtadv->ra_timer);
break;
case RTADV_TIMER_MSEC:
- thread_add_timer_msec(zrouter.master, rtadv_timer, zvrf, val,
- &rtadv->ra_timer);
+ event_add_timer_msec(zrouter.master, rtadv_timer, zvrf, val,
+ &rtadv->ra_timer);
break;
case RTADV_READ:
- thread_add_read(zrouter.master, rtadv_read, zvrf, rtadv->sock,
- &rtadv->ra_read);
+ event_add_read(zrouter.master, rtadv_read, zvrf, rtadv->sock,
+ &rtadv->ra_read);
break;
default:
break;
kernel_dplane_read(&zi->info);
/* Re-start read task */
- thread_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
- zi->info.sock, &zi->t_read);
+ event_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
+ zi->info.sock, &zi->t_read);
}
/*
struct dplane_zns_info *zi = THREAD_ARG(event);
/* Start read task */
- thread_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
- zi->info.sock, &zi->t_read);
+ event_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
+ zi->info.sock, &zi->t_read);
/* Send requests */
netlink_request_netconf(zi->info.sock);
* pthread is running, we'll initiate this later on.
*/
if (zdplane_info.dg_master)
- thread_add_event(zdplane_info.dg_master,
- dplane_incoming_request, zi, 0,
- &zi->t_request);
+ event_add_event(zdplane_info.dg_master, dplane_incoming_request,
+ zi, 0, &zi->t_request);
}
#endif /* HAVE_NETLINK */
* available.
*/
if (zdplane_info.dg_run) {
- thread_add_event(zdplane_info.dg_master,
- dplane_thread_loop, NULL, 0,
- &zdplane_info.dg_t_update);
+ event_add_event(zdplane_info.dg_master, dplane_thread_loop,
+ NULL, 0, &zdplane_info.dg_t_update);
}
return AOK;
if (dplane_work_pending()) {
/* Reschedule dplane check on a short timer */
- thread_add_timer_msec(zdplane_info.dg_master,
- dplane_check_shutdown_status,
- NULL, 100,
- &zdplane_info.dg_t_shutdown_check);
+ event_add_timer_msec(zdplane_info.dg_master,
+ dplane_check_shutdown_status, NULL, 100,
+ &zdplane_info.dg_t_shutdown_check);
/* TODO - give up and stop waiting after a short time? */
/* We appear to be done - schedule a final callback event
* for the zebra main pthread.
*/
- thread_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
+ event_add_event(zrouter.master, zebra_finalize, NULL, 0, NULL);
}
}
if (IS_ZEBRA_DEBUG_DPLANE)
zlog_debug("Zebra dataplane fini called");
- thread_add_event(zdplane_info.dg_master,
- dplane_check_shutdown_status, NULL, 0,
- &zdplane_info.dg_t_shutdown_check);
+ event_add_event(zdplane_info.dg_master, dplane_check_shutdown_status,
+ NULL, 0, &zdplane_info.dg_t_shutdown_check);
}
/*
zdplane_info.dg_run = true;
/* Enqueue an initial event for the dataplane pthread */
- thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
- &zdplane_info.dg_t_update);
+ event_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
+ &zdplane_info.dg_t_update);
/* Enqueue requests and reads if necessary */
frr_each (zns_info_list, &zdplane_info.dg_zns_list, zi) {
#if defined(HAVE_NETLINK)
- thread_add_read(zdplane_info.dg_master, dplane_incoming_read,
- zi, zi->info.sock, &zi->t_read);
+ event_add_read(zdplane_info.dg_master, dplane_incoming_read, zi,
+ zi->info.sock, &zi->t_read);
dplane_kernel_info_request(zi);
#endif
}
zvrf->dad_freeze_time);
}
- thread_add_timer(zrouter.master,
- zebra_evpn_dad_mac_auto_recovery_exp,
- mac, zvrf->dad_freeze_time,
- &mac->dad_mac_auto_recovery_timer);
+ event_add_timer(zrouter.master,
+ zebra_evpn_dad_mac_auto_recovery_exp,
+ mac, zvrf->dad_freeze_time,
+ &mac->dad_mac_auto_recovery_timer);
}
/* In case of local update, do not inform to client (BGPd),
zebra_evpn_zebra_mac_flag_dump(mac, mac_buf,
sizeof(mac_buf)));
}
- thread_add_timer(zrouter.master, zebra_evpn_mac_hold_exp_cb, mac,
- zmh_info->mac_hold_time, &mac->hold_timer);
+ event_add_timer(zrouter.master, zebra_evpn_mac_hold_exp_cb, mac,
+ zmh_info->mac_hold_time, &mac->hold_timer);
}
void zebra_evpn_mac_stop_hold_timer(struct zebra_mac *mac)
/* Start the DF delay timer on the local ES */
if (!es->df_delay_timer)
- thread_add_timer(zrouter.master, zebra_evpn_es_df_delay_exp_cb,
- es, ZEBRA_EVPN_MH_DF_DELAY_TIME,
- &es->df_delay_timer);
+ event_add_timer(zrouter.master, zebra_evpn_es_df_delay_exp_cb,
+ es, ZEBRA_EVPN_MH_DF_DELAY_TIME,
+ &es->df_delay_timer);
/* See if the local VTEP can function as DF on the ES */
if (!zebra_evpn_es_run_df_election(es, __func__)) {
zlog_debug(
"startup-delay timer started for %d sec on %s",
zmh_info->startup_delay_time, rc);
- thread_add_timer(zrouter.master,
- zebra_evpn_mh_startup_delay_exp_cb, NULL,
- zmh_info->startup_delay_time,
- &zmh_info->startup_delay_timer);
+ event_add_timer(zrouter.master,
+ zebra_evpn_mh_startup_delay_exp_cb, NULL,
+ zmh_info->startup_delay_time,
+ &zmh_info->startup_delay_timer);
zebra_evpn_mh_update_protodown(
ZEBRA_PROTODOWN_EVPN_STARTUP_DELAY, true /* set */);
} else {
if (IS_ZEBRA_DEBUG_EVPN_MH_NEIGH)
zlog_debug("sync-neigh vni %u ip %pIA mac %pEA 0x%x hold start",
n->zevpn->vni, &n->ip, &n->emac, n->flags);
- thread_add_timer(zrouter.master, zebra_evpn_neigh_hold_exp_cb, n,
- zmh_info->neigh_hold_time, &n->hold_timer);
+ event_add_timer(zrouter.master, zebra_evpn_neigh_hold_exp_cb, n,
+ zmh_info->neigh_hold_time, &n->hold_timer);
}
static void zebra_evpn_local_neigh_deref_mac(struct zebra_neigh *n,
__func__, &nbr->emac, &nbr->ip,
nbr->flags, zvrf->dad_freeze_time);
- thread_add_timer(zrouter.master,
- zebra_evpn_dad_ip_auto_recovery_exp,
- nbr, zvrf->dad_freeze_time,
- &nbr->dad_ip_auto_recovery_timer);
+ event_add_timer(zrouter.master,
+ zebra_evpn_dad_ip_auto_recovery_exp,
+ nbr, zvrf->dad_freeze_time,
+ &nbr->dad_ip_auto_recovery_timer);
}
if (zvrf->dad_freeze)
*is_dup_detect = true;
assert(!zfpm_g->t_read);
assert(zfpm_g->sock >= 0);
- thread_add_read(zfpm_g->master, zfpm_read_cb, 0, zfpm_g->sock,
- &zfpm_g->t_read);
+ event_add_read(zfpm_g->master, zfpm_read_cb, 0, zfpm_g->sock,
+ &zfpm_g->t_read);
}
/*
assert(!zfpm_g->t_write);
assert(zfpm_g->sock >= 0);
- thread_add_write(zfpm_g->master, zfpm_write_cb, 0, zfpm_g->sock,
- &zfpm_g->t_write);
+ event_add_write(zfpm_g->master, zfpm_write_cb, 0, zfpm_g->sock,
+ &zfpm_g->t_write);
}
/*
zfpm_g->stats.t_conn_up_yields++;
zfpm_rnodes_iter_pause(iter);
- thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb,
- NULL, 0, &zfpm_g->t_conn_up);
+ event_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb,
+ NULL, 0, &zfpm_g->t_conn_up);
return;
}
zfpm_debug("Starting conn_up thread");
- thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb, NULL, 0,
- &zfpm_g->t_conn_up);
+ event_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb, NULL, 0,
+ &zfpm_g->t_conn_up);
zfpm_g->stats.t_conn_up_starts++;
}
zfpm_g->stats.t_conn_down_yields++;
zfpm_rnodes_iter_pause(iter);
zfpm_g->t_conn_down = NULL;
- thread_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb,
- NULL, 0, &zfpm_g->t_conn_down);
+ event_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb,
+ NULL, 0, &zfpm_g->t_conn_down);
return;
}
assert(!zfpm_g->t_conn_down);
zfpm_rnodes_iter_init(&zfpm_g->t_conn_down_state.iter);
zfpm_g->t_conn_down = NULL;
- thread_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb, NULL, 0,
- &zfpm_g->t_conn_down);
+ event_add_timer_msec(zfpm_g->master, zfpm_conn_down_thread_cb, NULL, 0,
+ &zfpm_g->t_conn_down);
zfpm_g->stats.t_conn_down_starts++;
zfpm_set_state(ZFPM_STATE_IDLE, detail);
delay_secs = zfpm_calc_connect_delay();
zfpm_debug("scheduling connect in %ld seconds", delay_secs);
- thread_add_timer(zfpm_g->master, zfpm_connect_cb, 0, delay_secs,
- &zfpm_g->t_connect);
+ event_add_timer(zfpm_g->master, zfpm_connect_cb, 0, delay_secs,
+ &zfpm_g->t_connect);
zfpm_set_state(ZFPM_STATE_ACTIVE, reason);
}
{
assert(!zfpm_g->t_stats);
- thread_add_timer(zfpm_g->master, zfpm_stats_timer_cb, 0,
- ZFPM_STATS_IVL_SECS, &zfpm_g->t_stats);
+ event_add_timer(zfpm_g->master, zfpm_stats_timer_cb, 0,
+ ZFPM_STATS_IVL_SECS, &zfpm_g->t_stats);
}
/*
&& (info->t_stale_removal == NULL)) {
struct vrf *vrf = vrf_lookup_by_id(info->vrf_id);
- thread_add_timer(
+ event_add_timer(
zrouter.master,
zebra_gr_route_stale_delete_timer_expiry, info,
info->stale_removal_time,
__func__, zebra_route_string(client->proto),
VRF_LOGNAME(vrf), info->vrf_id, cnt);
- thread_add_timer(zrouter.master,
- zebra_gr_route_stale_delete_timer_expiry, info,
- ZEBRA_DEFAULT_STALE_UPDATE_DELAY,
- &info->t_stale_removal);
+ event_add_timer(zrouter.master,
+ zebra_gr_route_stale_delete_timer_expiry, info,
+ ZEBRA_DEFAULT_STALE_UPDATE_DELAY,
+ &info->t_stale_removal);
} else {
/* No routes to delete for the VRF */
LOG_GR("%s: Client %s vrf %s(%u) all stale routes processed",
* additional four bytes are for message type
*/
stream_putl_at(s, 0, msg_type);
- thread_add_event(zrouter.master, zebra_mlag_post_data_from_main_thread,
- s, 0, NULL);
+ event_add_event(zrouter.master, zebra_mlag_post_data_from_main_thread,
+ s, 0, NULL);
}
/**********************End of MLAG Interaction********************************/
* main thread.
*/
if (msg_type == MLAG_DEREGISTER) {
- thread_add_event(zrouter.master,
- zebra_mlag_terminate_pthread,
- NULL, 0, NULL);
+ event_add_event(zrouter.master,
+ zebra_mlag_terminate_pthread,
+ NULL, 0, NULL);
}
}
* during Zebra Init/after MLAG thread is destroyed.
* so it is safe to use without any locking
*/
- thread_add_event(zrouter.mlag_info.th_master,
- zebra_mlag_client_msg_handler, NULL, 0,
- &zrouter.mlag_info.t_write);
+ event_add_event(zrouter.mlag_info.th_master,
+ zebra_mlag_client_msg_handler, NULL, 0,
+ &zrouter.mlag_info.t_write);
return 0;
}
s = stream_new(ZEBRA_HEADER_SIZE + ZEBRA_MLAG_METADATA_LEN);
stream_putl(s, ZEBRA_MLAG_MSG_BCAST);
zclient_create_header(s, msg_type, VRF_DEFAULT);
- thread_add_event(zrouter.master, zebra_mlag_post_data_from_main_thread,
- s, 0, NULL);
+ event_add_event(zrouter.master, zebra_mlag_post_data_from_main_thread,
+ s, 0, NULL);
}
/**************************End of Multi-entrant Apis**************************/
static void zebra_mlag_sched_read(void)
{
- thread_add_read(zmlag_master, zebra_mlag_read, NULL, mlag_socket,
- &zrouter.mlag_info.t_read);
+ event_add_read(zmlag_master, zebra_mlag_read, NULL, mlag_socket,
+ &zrouter.mlag_info.t_read);
}
static void zebra_mlag_read(struct event *thread)
svr.sun_path);
close(mlag_socket);
zrouter.mlag_info.timer_running = true;
- thread_add_timer(zmlag_master, zebra_mlag_connect, NULL, 10,
- &zrouter.mlag_info.t_read);
+ event_add_timer(zmlag_master, zebra_mlag_connect, NULL, 10,
+ &zrouter.mlag_info.t_read);
return;
}
zlog_debug("%s: Connection with MLAG is established ",
__func__);
- thread_add_read(zmlag_master, zebra_mlag_read, NULL, mlag_socket,
- &zrouter.mlag_info.t_read);
+ event_add_read(zmlag_master, zebra_mlag_read, NULL, mlag_socket,
+ &zrouter.mlag_info.t_read);
/*
* Connection is established with MLAGD, post to clients
*/
*/
static int zebra_mlag_private_monitor_state(void)
{
- thread_add_event(zmlag_master, zebra_mlag_connect, NULL, 0,
- &zrouter.mlag_info.t_read);
+ event_add_event(zmlag_master, zebra_mlag_connect, NULL, 0,
+ &zrouter.mlag_info.t_read);
return 0;
}
/*
* Connect only if any clients are showing interest
*/
- thread_add_event(zmlag_master, zebra_mlag_connect, NULL, 0,
- &zrouter.mlag_info.t_read);
+ event_add_event(zmlag_master, zebra_mlag_connect, NULL, 0,
+ &zrouter.mlag_info.t_read);
}
return 0;
}
XFREE(MTYPE_NETNS_MISC, zns_info);
return 0;
}
- thread_add_timer_msec(zrouter.master, zebra_ns_ready_read,
- (void *)zns_info, ZEBRA_NS_POLLING_INTERVAL_MSEC,
- NULL);
+ event_add_timer_msec(zrouter.master, zebra_ns_ready_read,
+ (void *)zns_info, ZEBRA_NS_POLLING_INTERVAL_MSEC,
+ NULL);
return 0;
}
ssize_t len;
char event_name[NAME_MAX + 1];
- thread_add_read(zrouter.master, zebra_ns_notify_read, NULL, fd_monitor,
- &zebra_netns_notify_current);
+ event_add_read(zrouter.master, zebra_ns_notify_read, NULL, fd_monitor,
+ &zebra_netns_notify_current);
len = read(fd_monitor, buf, sizeof(buf));
if (len < 0) {
flog_err_sys(EC_ZEBRA_NS_NOTIFY_READ,
sizeof(struct zebra_netns_info));
netnsinfo->retries = ZEBRA_NS_POLLING_MAX_RETRIES;
netnsinfo->netnspath = netnspath;
- thread_add_timer_msec(zrouter.master, zebra_ns_ready_read,
- (void *)netnsinfo, 0, NULL);
+ event_add_timer_msec(zrouter.master, zebra_ns_ready_read,
+ (void *)netnsinfo, 0, NULL);
}
}
"NS notify watch: failed to add watch (%s)",
safe_strerror(errno));
}
- thread_add_read(zrouter.master, zebra_ns_notify_read, NULL, fd_monitor,
- &zebra_netns_notify_current);
+ event_add_read(zrouter.master, zebra_ns_notify_read, NULL, fd_monitor,
+ &zebra_netns_notify_current);
}
void zebra_ns_notify_close(void)
!CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND)) {
nhe->refcnt = 1;
SET_FLAG(nhe->flags, NEXTHOP_GROUP_KEEP_AROUND);
- thread_add_timer(zrouter.master, zebra_nhg_timer, nhe,
- zrouter.nhg_keep, &nhe->timer);
+ event_add_timer(zrouter.master, zebra_nhg_timer, nhe,
+ zrouter.nhg_keep, &nhe->timer);
return;
}
atomic_store_explicit(&zo_info.run, 1, memory_order_relaxed);
/* Enqueue an initial event for the pthread */
- thread_add_event(zo_info.master, process_messages, NULL, 0,
- &zo_info.t_msgs);
+ event_add_event(zo_info.master, process_messages, NULL, 0,
+ &zo_info.t_msgs);
/* And start the pthread */
frr_pthread_run(zo_info.pthread, NULL);
if (IS_ZEBRA_DEBUG_RECV && IS_ZEBRA_DEBUG_DETAIL)
zlog_debug("%s: received %u messages",
__func__, counter);
- thread_add_event(zo_info.master, process_messages, NULL, 0,
- &zo_info.t_msgs);
+ event_add_event(zo_info.master, process_messages, NULL, 0,
+ &zo_info.t_msgs);
}
return counter;
if (need_resched) {
atomic_fetch_add_explicit(&zo_info.yields, 1,
memory_order_relaxed);
- thread_add_event(zo_info.master, process_messages, NULL, 0,
- &zo_info.t_msgs);
+ event_add_event(zo_info.master, process_messages, NULL, 0,
+ &zo_info.t_msgs);
}
/* This will also free any leftover messages, in the shutdown case */
ptm_cb.ptm_sock = -1;
zebra_ptm_reset_status(0);
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time, &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
return;
case BUFFER_PENDING:
ptm_cb.t_write = NULL;
- thread_add_write(zrouter.master, zebra_ptm_flush_messages, NULL,
- ptm_cb.ptm_sock, &ptm_cb.t_write);
+ event_add_write(zrouter.master, zebra_ptm_flush_messages, NULL,
+ ptm_cb.ptm_sock, &ptm_cb.t_write);
break;
case BUFFER_EMPTY:
break;
ptm_cb.ptm_sock = -1;
zebra_ptm_reset_status(0);
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time, &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
return -1;
case BUFFER_EMPTY:
THREAD_OFF(ptm_cb.t_write);
break;
case BUFFER_PENDING:
- thread_add_write(zrouter.master, zebra_ptm_flush_messages, NULL,
- ptm_cb.ptm_sock, &ptm_cb.t_write);
+ event_add_write(zrouter.master, zebra_ptm_flush_messages, NULL,
+ ptm_cb.ptm_sock, &ptm_cb.t_write);
break;
}
if (ptm_cb.ptm_sock != -1) {
if (init) {
ptm_cb.t_read = NULL;
- thread_add_read(zrouter.master, zebra_ptm_sock_read,
- NULL, ptm_cb.ptm_sock, &ptm_cb.t_read);
+ event_add_read(zrouter.master, zebra_ptm_sock_read,
+ NULL, ptm_cb.ptm_sock, &ptm_cb.t_read);
zebra_bfd_peer_replay_req();
}
zebra_ptm_send_status_req();
ptm_cb.reconnect_time = ZEBRA_PTM_RECONNECT_TIME_MAX;
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time, &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
} else if (ptm_cb.reconnect_time >= ZEBRA_PTM_RECONNECT_TIME_MAX) {
ptm_cb.reconnect_time = ZEBRA_PTM_RECONNECT_TIME_INITIAL;
}
ptm_cb.ptm_sock = -1;
zebra_ptm_reset_status(0);
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time,
- &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
return;
}
ptm_cb.t_read = NULL;
- thread_add_read(zrouter.master, zebra_ptm_sock_read, NULL,
- ptm_cb.ptm_sock, &ptm_cb.t_read);
+ event_add_read(zrouter.master, zebra_ptm_sock_read, NULL,
+ ptm_cb.ptm_sock, &ptm_cb.t_read);
}
/* BFD peer/dst register/update */
if (ptm_cb.ptm_sock == -1) {
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time, &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
return;
}
if (ptm_cb.ptm_sock == -1) {
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time, &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
return;
}
if (ptm_cb.ptm_sock == -1) {
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time, &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
return;
}
if (ptm_cb.ptm_sock == -1) {
ptm_cb.t_timer = NULL;
- thread_add_timer(zrouter.master, zebra_ptm_connect, NULL,
- ptm_cb.reconnect_time, &ptm_cb.t_timer);
+ event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
+ ptm_cb.reconnect_time, &ptm_cb.t_timer);
return 0;
}
/* schedule to retry later */
THREAD_OFF(pw->install_retry_timer);
- thread_add_timer(zrouter.master, zebra_pw_install_retry, pw,
- PW_INSTALL_RETRY_INTERVAL, &pw->install_retry_timer);
+ event_add_timer(zrouter.master, zebra_pw_install_retry, pw,
+ PW_INSTALL_RETRY_INTERVAL, &pw->install_retry_timer);
zebra_pw_update_status(pw, pwstatus);
}
ctx = rib_update_ctx_init(0, event);
- thread_add_event(zrouter.master, rib_update_handler, ctx, 0,
- &t_rib_update_threads[event]);
+ event_add_event(zrouter.master, rib_update_handler, ctx, 0,
+ &t_rib_update_threads[event]);
if (IS_ZEBRA_DEBUG_EVENT)
zlog_debug("%s: Scheduled VRF (ALL), event %s", __func__,
}
/* Ensure event is signalled to zebra main pthread */
- thread_add_event(zrouter.master, rib_process_dplane_results, NULL, 0,
- &t_dplane);
+ event_add_event(zrouter.master, rib_process_dplane_results, NULL, 0,
+ &t_dplane);
return 0;
}
if (zebra_rmap_update_timer)
THREAD_OFF(zebra_t_rmap_update);
- thread_add_timer(zrouter.master, zebra_route_map_update_timer,
- NULL, zebra_rmap_update_timer, &zebra_t_rmap_update);
+ event_add_timer(zrouter.master, zebra_route_map_update_timer, NULL,
+ zebra_rmap_update_timer, &zebra_t_rmap_update);
}
static void zebra_route_map_add(const char *rmap_name)
/*
* Zebra server event driver for all client threads.
*
- * This is essentially a wrapper around thread_add_event() that centralizes
+ * This is essentially a wrapper around event_add_event() that centralizes
* those scheduling calls into one place.
*
* All calls to this function schedule an event on the pthread running the
/*
* Zebra server event driver for the main thread.
*
- * This is essentially a wrapper around thread_add_event() that centralizes
+ * This is essentially a wrapper around event_add_event() that centralizes
* those scheduling calls into one place.
*
* All calls to this function schedule an event on Zebra's main pthread.
{
switch (event) {
case ZSERV_CLIENT_READ:
- thread_add_read(client->pthread->master, zserv_read, client,
- client->sock, &client->t_read);
+ event_add_read(client->pthread->master, zserv_read, client,
+ client->sock, &client->t_read);
break;
case ZSERV_CLIENT_WRITE:
- thread_add_write(client->pthread->master, zserv_write, client,
- client->sock, &client->t_write);
+ event_add_write(client->pthread->master, zserv_write, client,
+ client->sock, &client->t_write);
break;
}
}
* main pthread.
*/
if (client->is_closed)
- thread_add_event(zrouter.master,
- zserv_handle_client_fail,
- client, 0, &client->t_cleanup);
+ event_add_event(zrouter.master,
+ zserv_handle_client_fail,
+ client, 0, &client->t_cleanup);
}
}
{
switch (event) {
case ZSERV_ACCEPT:
- thread_add_read(zrouter.master, zserv_accept, NULL, zsock,
- NULL);
+ event_add_read(zrouter.master, zserv_accept, NULL, zsock, NULL);
break;
case ZSERV_PROCESS_MESSAGES:
- thread_add_event(zrouter.master, zserv_process_messages, client,
- 0, &client->t_process);
+ event_add_event(zrouter.master, zserv_process_messages, client,
+ 0, &client->t_process);
break;
case ZSERV_HANDLE_CLIENT_FAIL:
- thread_add_event(zrouter.master, zserv_handle_client_fail,
- client, 0, &client->t_cleanup);
+ event_add_event(zrouter.master, zserv_handle_client_fail,
+ client, 0, &client->t_cleanup);
}
}