babel_interface_close_all();
/* cancel events */
- thread_cancel(&babel_routing_process->t_read);
- thread_cancel(&babel_routing_process->t_update);
+ event_cancel(&babel_routing_process->t_read);
+ event_cancel(&babel_routing_process->t_update);
distribute_list_delete(&babel_routing_process->distribute_ctx);
XFREE(MTYPE_BABEL, babel_routing_process);
babel_set_timer(struct timeval *timeout)
{
long msecs = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;
- thread_cancel(&(babel_routing_process->t_update));
+ event_cancel(&(babel_routing_process->t_update));
event_add_timer_msec(master, babel_main_loop, NULL, msecs,
&babel_routing_process->t_update);
}
{
struct bfd_control_socket *bcs;
- thread_cancel(&bglobal.bg_csockev);
+ event_cancel(&bglobal.bg_csockev);
socket_close(&bglobal.bg_csock);
struct bfd_control_queue *bcq;
struct bfd_notify_peer *bnp;
- thread_cancel(&(bcs->bcs_ev));
- thread_cancel(&(bcs->bcs_outev));
+ event_cancel(&(bcs->bcs_ev));
+ event_cancel(&(bcs->bcs_outev));
close(bcs->bcs_sd);
return 1;
empty_list:
- thread_cancel(&(bcs->bcs_outev));
+ event_cancel(&(bcs->bcs_outev));
bcs->bcs_bout = NULL;
return 0;
}
#define BGP_EVENT_FLUSH(P) \
do { \
assert(peer); \
- thread_cancel_event_ready(bm->master, (P)); \
+ event_cancel_event_ready(bm->master, (P)); \
} while (0)
#define BGP_UPDATE_GROUP_TIMER_ON(T, F) \
struct frr_pthread *fpt = bgp_pth_io;
assert(fpt->running);
- thread_cancel_async(fpt->master, &peer->t_write, NULL);
+ event_cancel_async(fpt->master, &peer->t_write, NULL);
THREAD_OFF(peer->t_generate_updgrp_packets);
UNSET_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON);
struct frr_pthread *fpt = bgp_pth_io;
assert(fpt->running);
- thread_cancel_async(fpt->master, &peer->t_read, NULL);
+ event_cancel_async(fpt->master, &peer->t_read, NULL);
THREAD_OFF(peer->t_process_packet);
THREAD_OFF(peer->t_process_packet_error);
}
if (tcb->event_thread)
- thread_cancel(&tcb->event_thread);
+ event_cancel(&tcb->event_thread);
lpt_inprogress = false;
}
}
if (tcb->event_thread)
- thread_cancel(&tcb->event_thread);
+ event_cancel(&tcb->event_thread);
memset(tcb, 0, sizeof(*tcb));
bgp_timer_set(peer);
bgp_reads_off(peer);
bgp_writes_off(peer);
- thread_cancel_event_ready(bm->master, peer);
+ event_cancel_event_ready(bm->master, peer);
FOREACH_AFI_SAFI (afi, safi)
THREAD_OFF(peer->t_revalidate_all[afi][safi]);
assert(!peer->t_write);
bgp_keepalives_off(peer);
bgp_reads_off(peer);
bgp_writes_off(peer);
- thread_cancel_event_ready(bm->master, peer);
+ event_cancel_event_ready(bm->master, peer);
FOREACH_AFI_SAFI (afi, safi)
THREAD_OFF(peer->t_revalidate_all[afi][safi]);
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON));
- ``task`` is ``struct event *``
- ``fetch`` is ``thread_fetch()``
- ``exec()`` is ``thread_call``
-- ``cancel()`` is ``thread_cancel()``
+- ``cancel()`` is ``event_cancel()``
- ``schedule()`` is any of the various task-specific ``event_add_*`` functions
Adding tasks is done with various task-specific function-like macros. These
communication and boils down to a slightly more complicated method of message
passing, where the messages are the regular task events as used in the
event-driven model. The only difference is thread cancellation, which requires
-calling ``thread_cancel_async()`` instead of ``thread_cancel`` to cancel a task
+calling ``event_cancel_async()`` instead of ``event_cancel`` to cancel a task
currently scheduled on a ``threadmaster`` belonging to a different pthread.
This is necessary to avoid race conditions in the specific case where one
pthread wants to guarantee that a task on another pthread is cancelled before
frr_libfrr:frr_pthread_stop (loglevel: TRACE_DEBUG_LINE (13)) (type: tracepoint)
frr_libfrr:frr_pthread_run (loglevel: TRACE_DEBUG_LINE (13)) (type: tracepoint)
frr_libfrr:thread_call (loglevel: TRACE_INFO (6)) (type: tracepoint)
- frr_libfrr:thread_cancel_async (loglevel: TRACE_INFO (6)) (type: tracepoint)
- frr_libfrr:thread_cancel (loglevel: TRACE_INFO (6)) (type: tracepoint)
+ frr_libfrr:event_cancel_async (loglevel: TRACE_INFO (6)) (type: tracepoint)
+ frr_libfrr:event_cancel (loglevel: TRACE_INFO (6)) (type: tracepoint)
frr_libfrr:schedule_write (loglevel: TRACE_INFO (6)) (type: tracepoint)
frr_libfrr:schedule_read (loglevel: TRACE_INFO (6)) (type: tracepoint)
frr_libfrr:schedule_event (loglevel: TRACE_INFO (6)) (type: tracepoint)
// TODO: check Graceful restart after 10sec
/* cancel GR scheduled */
- thread_cancel(&(e->t_distribute));
+ event_cancel(&(e->t_distribute));
/* schedule Graceful restart for whole process in 10sec */
event_add_timer(master, eigrp_distribute_timer_process, e, (10),
// TODO: check Graceful restart after 10sec
/* Cancel GR scheduled */
- thread_cancel(&(ei->t_distribute));
+ event_cancel(&(ei->t_distribute));
/* schedule Graceful restart for interface in 10sec */
event_add_timer(master, eigrp_distribute_timer_interface, ei, 10,
&ei->t_distribute);
if (ei->on_write_q) {
listnode_delete(eigrp->oi_write_q, ei);
if (list_isempty(eigrp->oi_write_q))
- thread_cancel(&(eigrp->t_write));
+ event_cancel(&(eigrp->t_write));
ei->on_write_q = 0;
}
}
struct eigrp *eigrp = ei->eigrp;
if (source == INTERFACE_DOWN_BY_VTY) {
- thread_cancel(&ei->t_hello);
+ event_cancel(&ei->t_hello);
eigrp_hello_send(ei, EIGRP_HELLO_GRACEFUL_SHUTDOWN, NULL);
}
eigrp_topology_neighbor_down(nbr->ei->eigrp, nbr);
/* Cancel all events. */ /* Thread lookup cost would be negligible. */
- thread_cancel_event(master, nbr);
+ event_cancel_event(master, nbr);
eigrp_fifo_free(nbr->multicast_queue);
eigrp_fifo_free(nbr->retrans_queue);
THREAD_OFF(nbr->t_holddown);
circuit->snd_stream = NULL;
}
- thread_cancel_event(master, circuit);
+ event_cancel_event(master, circuit);
return;
}
THREAD_OFF(area->t_lsp_refresh[1]);
THREAD_OFF(area->t_rlfa_rib_update);
- thread_cancel_event(master, area);
+ event_cancel_event(master, area);
listnode_delete(area->isis->area_list, area);
struct event **thr;
int fd, thr_fd;
- thread_cancel(&timeout_thr);
+ event_cancel(&timeout_thr);
FD_ZERO(&fds);
snmp_select_info(&maxfd, &fds, &timeout, &block);
if (thr_fd == fd) {
struct listnode *nextln = listnextnode(ln);
if (!FD_ISSET(fd, &fds)) {
- thread_cancel(thr);
+ event_cancel(thr);
XFREE(MTYPE_TMP, thr);
list_delete_node(events, ln);
}
while (ln) {
struct listnode *nextln = listnextnode(ln);
thr = listgetdata(ln);
- thread_cancel(thr);
+ event_cancel(thr);
XFREE(MTYPE_TMP, thr);
list_delete_node(events, ln);
ln = nextln;
};
/* Flags for task cancellation */
-#define THREAD_CANCEL_FLAG_READY 0x01
+#define EVENT_CANCEL_FLAG_READY 0x01
static int thread_timer_cmp(const struct event *a, const struct event *b)
{
* - POLLIN
* - POLLOUT
*/
-static void thread_cancel_rw(struct thread_master *master, int fd, short state,
- int idx_hint)
+static void event_cancel_rw(struct thread_master *master, int fd, short state,
+ int idx_hint)
{
bool found = false;
}
/* If requested, stop here and ignore io and timers */
- if (CHECK_FLAG(cr->flags, THREAD_CANCEL_FLAG_READY))
+ if (CHECK_FLAG(cr->flags, EVENT_CANCEL_FLAG_READY))
return;
/* Check the io tasks */
fd = pfd->fd;
/* Found a match to cancel: clean up fd arrays */
- thread_cancel_rw(master, pfd->fd, pfd->events, i);
+ event_cancel_rw(master, pfd->fd, pfd->events, i);
/* Clean up thread arrays */
master->read[fd] = NULL;
* @param master the thread master to process
* @REQUIRE master->mtx
*/
-static void do_thread_cancel(struct thread_master *master)
+static void do_event_cancel(struct thread_master *master)
{
struct thread_list_head *list = NULL;
struct event **thread_array = NULL;
/* Determine the appropriate queue to cancel the thread from */
switch (thread->type) {
case THREAD_READ:
- thread_cancel_rw(master, thread->u.fd, POLLIN, -1);
+ event_cancel_rw(master, thread->u.fd, POLLIN, -1);
thread_array = master->read;
break;
case THREAD_WRITE:
- thread_cancel_rw(master, thread->u.fd, POLLOUT, -1);
+ event_cancel_rw(master, thread->u.fd, POLLOUT, -1);
thread_array = master->write;
break;
case THREAD_TIMER:
if (master->cancel_req)
list_delete_all_node(master->cancel_req);
- /* Wake up any threads which may be blocked in thread_cancel_async() */
+ /* Wake up any threads which may be blocked in event_cancel_async() */
master->canceled = true;
pthread_cond_broadcast(&master->cancel_cond);
}
frr_with_mutex (&m->mtx) {
cr->eventobj = arg;
listnode_add(m->cancel_req, cr);
- do_thread_cancel(m);
+ do_event_cancel(m);
}
}
* @param m the thread_master to cancel from
* @param arg the argument passed when creating the event
*/
-void thread_cancel_event(struct thread_master *master, void *arg)
+void event_cancel_event(struct thread_master *master, void *arg)
{
cancel_event_helper(master, arg, 0);
}
* @param m the thread_master to cancel from
* @param arg the argument passed when creating the event
*/
-void thread_cancel_event_ready(struct thread_master *m, void *arg)
+void event_cancel_event_ready(struct thread_master *m, void *arg)
{
/* Only cancel ready/event tasks */
- cancel_event_helper(m, arg, THREAD_CANCEL_FLAG_READY);
+ cancel_event_helper(m, arg, EVENT_CANCEL_FLAG_READY);
}
/**
*
* @param thread task to cancel
*/
-void thread_cancel(struct event **thread)
+void event_cancel(struct event **thread)
{
struct thread_master *master;
master = (*thread)->master;
- frrtrace(9, frr_libfrr, thread_cancel, master,
- (*thread)->xref->funcname, (*thread)->xref->xref.file,
- (*thread)->xref->xref.line, NULL, (*thread)->u.fd,
- (*thread)->u.val, (*thread)->arg, (*thread)->u.sands.tv_sec);
+ frrtrace(9, frr_libfrr, event_cancel, master, (*thread)->xref->funcname,
+ (*thread)->xref->xref.file, (*thread)->xref->xref.line, NULL,
+ (*thread)->u.fd, (*thread)->u.val, (*thread)->arg,
+ (*thread)->u.sands.tv_sec);
assert(master->owner == pthread_self());
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
cr->thread = *thread;
listnode_add(master->cancel_req, cr);
- do_thread_cancel(master);
+ do_event_cancel(master);
}
*thread = NULL;
* @param thread pointer to thread to cancel
* @param eventobj the event
*/
-void thread_cancel_async(struct thread_master *master, struct event **thread,
- void *eventobj)
+void event_cancel_async(struct thread_master *master, struct event **thread,
+ void *eventobj)
{
assert(!(thread && eventobj) && (thread || eventobj));
if (thread && *thread)
- frrtrace(9, frr_libfrr, thread_cancel_async, master,
+ frrtrace(9, frr_libfrr, event_cancel_async, master,
(*thread)->xref->funcname, (*thread)->xref->xref.file,
(*thread)->xref->xref.line, NULL, (*thread)->u.fd,
(*thread)->u.val, (*thread)->arg,
(*thread)->u.sands.tv_sec);
else
- frrtrace(9, frr_libfrr, thread_cancel_async, master, NULL, NULL,
+ frrtrace(9, frr_libfrr, event_cancel_async, master, NULL, NULL,
0, NULL, 0, 0, eventobj, 0);
assert(master->owner != pthread_self());
ready++;
/*
- * Unless someone has called thread_cancel from another
+ * Unless someone has called event_cancel from another
* pthread, the only thing that could have changed in
* m->handler.pfds while we were asleep is the .events
- * field in a given pollfd. Barring thread_cancel() that
+ * field in a given pollfd. Barring event_cancel() that
* value should be a superset of the values we have in our
* copy, so there's no need to update it. Similarily,
* barring deletion, the fd should still be a valid index
pthread_mutex_lock(&m->mtx);
/* Process any pending cancellation requests */
- do_thread_cancel(m);
+ do_event_cancel(m);
/*
* Attempt to flush ready queue before going into poll().
/*
* Please consider this macro deprecated, and do not use it in new code.
*/
-#define THREAD_OFF(thread) \
- do { \
- if ((thread)) \
- thread_cancel(&(thread)); \
+#define THREAD_OFF(thread) \
+ do { \
+ if ((thread)) \
+ event_cancel(&(thread)); \
} while (0)
/*
struct thread_master *master,
void (*fn)(struct event *), void *arg, int val);
-extern void thread_cancel(struct event **event);
-extern void thread_cancel_async(struct thread_master *, struct event **,
- void *);
+extern void event_cancel(struct event **event);
+extern void event_cancel_async(struct thread_master *, struct event **, void *);
/* Cancel ready tasks with an arg matching 'arg' */
-extern void thread_cancel_event_ready(struct thread_master *m, void *arg);
+extern void event_cancel_event_ready(struct thread_master *m, void *arg);
/* Cancel all tasks with an arg matching 'arg', including timers and io */
-extern void thread_cancel_event(struct thread_master *m, void *arg);
+extern void event_cancel_event(struct thread_master *m, void *arg);
extern struct event *thread_fetch(struct thread_master *, struct event *event);
extern void thread_call(struct event *event);
extern unsigned long thread_timer_remain_second(struct event *event);
cb->in_cb = false;
if (events & ZMQ_POLLIN) {
- thread_cancel(&cb->read.thread);
+ event_cancel(&cb->read.thread);
event_add_event(master, frrzmq_read_msg, cbp, fd,
&cb->read.thread);
cb->in_cb = false;
if (events & ZMQ_POLLOUT) {
- thread_cancel(&cb->write.thread);
+ event_cancel(&cb->write.thread);
_event_add_event(xref, master, frrzmq_write_msg, cbp, fd,
&cb->write.thread);
if (!cb || !*cb)
return;
core->cancelled = true;
- thread_cancel(&core->thread);
+ event_cancel(&core->thread);
/* If cancelled from within a callback, don't try to free memory
* in this path.
if ((events & event) && core->thread && !core->cancelled) {
struct thread_master *tm = core->thread->master;
- thread_cancel(&core->thread);
+ event_cancel(&core->thread);
if (event == ZMQ_POLLIN)
event_add_event(tm, frrzmq_read_msg, cbp, cb->fd,
THREAD_OPERATION_TRACEPOINT_INSTANCE(schedule_event)
THREAD_OPERATION_TRACEPOINT_INSTANCE(schedule_read)
THREAD_OPERATION_TRACEPOINT_INSTANCE(schedule_write)
-THREAD_OPERATION_TRACEPOINT_INSTANCE(thread_cancel)
-THREAD_OPERATION_TRACEPOINT_INSTANCE(thread_cancel_async)
+THREAD_OPERATION_TRACEPOINT_INSTANCE(event_cancel)
+THREAD_OPERATION_TRACEPOINT_INSTANCE(event_cancel_async)
THREAD_OPERATION_TRACEPOINT_INSTANCE(thread_call)
TRACEPOINT_EVENT(
void nb_cli_confirmed_commit_clean(struct vty *vty)
{
- thread_cancel(&vty->t_confirmed_commit_timeout);
+ event_cancel(&vty->t_confirmed_commit_timeout);
nb_config_free(vty->confirmed_commit_rollback);
vty->confirmed_commit_rollback = NULL;
}
"%% Resetting confirmed-commit timeout to %u minute(s)\n\n",
confirmed_timeout);
- thread_cancel(&vty->t_confirmed_commit_timeout);
+ event_cancel(&vty->t_confirmed_commit_timeout);
event_add_timer(master, nb_cli_confirmed_commit_timeout,
vty, confirmed_timeout * 60,
&vty->t_confirmed_commit_timeout);
if (!backoff)
return;
- thread_cancel(&backoff->t_holddown);
- thread_cancel(&backoff->t_timetolearn);
+ event_cancel(&backoff->t_holddown);
+ event_cancel(&backoff->t_timetolearn);
XFREE(MTYPE_SPF_BACKOFF_NAME, backoff->name);
XFREE(MTYPE_SPF_BACKOFF, backoff);
break;
case SPF_BACKOFF_SHORT_WAIT:
case SPF_BACKOFF_LONG_WAIT:
- thread_cancel(&backoff->t_holddown);
+ event_cancel(&backoff->t_holddown);
event_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
backoff, backoff->holddown,
&backoff->t_holddown);
{
int fd = -1;
- thread_cancel(&zcf->t_reconnect);
+ event_cancel(&zcf->t_reconnect);
if (zcf->prio_min != ZLOG_DISABLED)
fd = zlog_5424_open(zcf, -1);
if (!zcf->active)
return true;
- thread_cancel(&zcf->t_reconnect);
+ event_cancel(&zcf->t_reconnect);
/* need to retain the socket type because it also influences
* other fields (packets) and we can't atomically swap these
void netlink_set_nflog_group(int nlgroup)
{
if (netlink_log_fd >= 0) {
- thread_cancel(&netlink_log_thread);
+ event_cancel(&netlink_log_thread);
close(netlink_log_fd);
netlink_log_fd = -1;
}
if (!oi->type_cfg)
oi->type = ospf6_default_iftype(oi->interface);
- thread_cancel(&oi->thread_sso);
+ event_cancel(&oi->thread_sso);
if (IS_OSPF6_DEBUG_INTERFACE)
zlog_debug("Interface Event %s: [InterfaceUp]",
if (oi->on_write_q) {
listnode_delete(ospf6->oi_write_q, oi);
if (list_isempty(ospf6->oi_write_q))
- thread_cancel(&ospf6->t_write);
+ event_cancel(&ospf6->t_write);
oi->on_write_q = 0;
}
* from VRF and make it "down".
*/
ospf6_vrf_unlink(ospf6, vrf);
- thread_cancel(&ospf6->t_ospf6_receive);
+ event_cancel(&ospf6->t_ospf6_receive);
close(ospf6->fd);
ospf6->fd = -1;
}
listnode_delete(oi->ospf->oiflist, oi);
listnode_delete(oi->area->oiflist, oi);
- thread_cancel_event(master, oi);
+ event_cancel_event(master, oi);
memset(oi, 0, sizeof(*oi));
XFREE(MTYPE_OSPF_IF, oi);
THREAD_OFF(nbr->t_ls_upd);
/* Cancel all events. */ /* Thread lookup cost would be negligible. */
- thread_cancel_event(master, nbr);
+ event_cancel_event(master, nbr);
bfd_sess_free(&nbr->bfd_session);
}
if ((*thread)->master->owner == pthread_self()) {
- thread_cancel(thread);
+ event_cancel(thread);
} else {
- thread_cancel_async((*thread)->master, thread, NULL);
+ event_cancel_async((*thread)->master, thread, NULL);
}
}
}
if (pcc_state->t_reconnect != NULL) {
- thread_cancel(&pcc_state->t_reconnect);
+ event_cancel(&pcc_state->t_reconnect);
pcc_state->t_reconnect = NULL;
}
if (pcc_state->t_update_best != NULL) {
- thread_cancel(&pcc_state->t_update_best);
+ event_cancel(&pcc_state->t_update_best);
pcc_state->t_update_best = NULL;
}
if (pcc_state->t_session_timeout != NULL) {
- thread_cancel(&pcc_state->t_session_timeout);
+ event_cancel(&pcc_state->t_session_timeout);
pcc_state->t_session_timeout = NULL;
}
assert(pcc_state->sess == NULL);
if (pcc_state->t_reconnect != NULL) {
- thread_cancel(&pcc_state->t_reconnect);
+ event_cancel(&pcc_state->t_reconnect);
pcc_state->t_reconnect = NULL;
}
// In case some best pce alternative were waiting to activate
if (pcc_state->t_update_best != NULL) {
- thread_cancel(&pcc_state->t_update_best);
+ event_cancel(&pcc_state->t_update_best);
pcc_state->t_update_best = NULL;
}
void path_ted_timer_sync_cancel(void)
{
if (ted_state_g.t_link_state_sync != NULL) {
- thread_cancel(&ted_state_g.t_link_state_sync);
+ event_cancel(&ted_state_g.t_link_state_sync);
ted_state_g.t_link_state_sync = NULL;
}
}
void path_ted_timer_refresh_cancel(void)
{
if (ted_state_g.t_segment_list_refresh != NULL) {
- thread_cancel(&ted_state_g.t_segment_list_refresh);
+ event_cancel(&ted_state_g.t_segment_list_refresh);
ted_state_g.t_segment_list_refresh = NULL;
}
}
/* The hook needs to be call synchronously, otherwise the candidate
path will be already deleted when the handler is called */
if (candidate->hook_timer != NULL) {
- thread_cancel(&candidate->hook_timer);
+ event_cancel(&candidate->hook_timer);
candidate->hook_timer = NULL;
}
hook_call(pathd_candidate_removed, candidate);
event_add_write(mp->pim->msdp.master, pim_msdp_write, mp, mp->fd, \
&mp->t_write)
-#define PIM_MSDP_PEER_READ_OFF(mp) thread_cancel(&mp->t_read)
-#define PIM_MSDP_PEER_WRITE_OFF(mp) thread_cancel(&mp->t_write)
+#define PIM_MSDP_PEER_READ_OFF(mp) event_cancel(&mp->t_read)
+#define PIM_MSDP_PEER_WRITE_OFF(mp) event_cancel(&mp->t_write)
#if PIM_IPV != 6
// struct pim_msdp *msdp;
continue;
XFREE(MTYPE_TMP, timers[index]->arg);
- thread_cancel(&timers[index]);
+ event_cancel(&timers[index]);
timers_pending--;
}
event_add_timer_msec(master, dummy_func, NULL, 0, &timers[i]);
}
for (i = 0; i < SCHEDULE_TIMERS; i++)
- thread_cancel(&timers[i]);
+ event_cancel(&timers[i]);
monotime(&tv_start);
int index;
index = prng_rand(prng) % SCHEDULE_TIMERS;
- thread_cancel(&timers[index]);
+ event_cancel(&timers[index]);
}
monotime(&tv_stop);
what = restart->what;
restart->pid = 0;
gs.numpids--;
- thread_cancel(&restart->t_kill);
+ event_cancel(&restart->t_kill);
/* Update restart time to reflect the time the command
* completed. */
dmn->name, (long)delay.tv_sec, (long)delay.tv_usec);
SET_READ_HANDLER(dmn);
- thread_cancel(&dmn->t_wakeup);
+ event_cancel(&dmn->t_wakeup);
SET_WAKEUP_ECHO(dmn);
}
static void set_phase(enum restart_phase new_phase)
{
gs.phase = new_phase;
- thread_cancel(&gs.t_phase_hanging);
+ event_cancel(&gs.t_phase_hanging);
event_add_timer(master, phase_hanging, NULL, PHASE_TIMEOUT,
&gs.t_phase_hanging);
static void fpm_reconnect(struct fpm_nl_ctx *fnc)
{
/* Cancel all zebra threads first. */
- thread_cancel_async(zrouter.master, &fnc->t_lspreset, NULL);
- thread_cancel_async(zrouter.master, &fnc->t_lspwalk, NULL);
- thread_cancel_async(zrouter.master, &fnc->t_nhgreset, NULL);
- thread_cancel_async(zrouter.master, &fnc->t_nhgwalk, NULL);
- thread_cancel_async(zrouter.master, &fnc->t_ribreset, NULL);
- thread_cancel_async(zrouter.master, &fnc->t_ribwalk, NULL);
- thread_cancel_async(zrouter.master, &fnc->t_rmacreset, NULL);
- thread_cancel_async(zrouter.master, &fnc->t_rmacwalk, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_lspreset, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_lspwalk, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_nhgreset, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_nhgwalk, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_ribreset, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_ribwalk, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_rmacreset, NULL);
+ event_cancel_async(zrouter.master, &fnc->t_rmacwalk, NULL);
/*
* Grab the lock to empty the streams (data plane might try to
THREAD_OFF(fnc->t_rmacwalk);
THREAD_OFF(fnc->t_event);
THREAD_OFF(fnc->t_nhg);
- thread_cancel_async(fnc->fthread->master, &fnc->t_read, NULL);
- thread_cancel_async(fnc->fthread->master, &fnc->t_write, NULL);
- thread_cancel_async(fnc->fthread->master, &fnc->t_connect, NULL);
+ event_cancel_async(fnc->fthread->master, &fnc->t_read, NULL);
+ event_cancel_async(fnc->fthread->master, &fnc->t_write, NULL);
+ event_cancel_async(fnc->fthread->master, &fnc->t_connect, NULL);
if (fnc->socket != -1) {
close(fnc->socket);
/* Stop any outstanding tasks */
if (zdplane_info.dg_master) {
- thread_cancel_async(zdplane_info.dg_master,
- &zi->t_request, NULL);
+ event_cancel_async(zdplane_info.dg_master,
+ &zi->t_request, NULL);
- thread_cancel_async(zdplane_info.dg_master, &zi->t_read,
- NULL);
+ event_cancel_async(zdplane_info.dg_master, &zi->t_read,
+ NULL);
}
XFREE(MTYPE_DP_NS, zi);
zdplane_info.dg_run = false;
if (zdplane_info.dg_t_update)
- thread_cancel_async(zdplane_info.dg_t_update->master,
- &zdplane_info.dg_t_update, NULL);
+ event_cancel_async(zdplane_info.dg_t_update->master,
+ &zdplane_info.dg_t_update, NULL);
frr_pthread_stop(zdplane_info.dg_pthread, NULL);
zlog_debug("Closing client '%s'",
zebra_route_string(client->proto));
- thread_cancel_event(zrouter.master, client);
+ event_cancel_event(zrouter.master, client);
THREAD_OFF(client->t_cleanup);
THREAD_OFF(client->t_process);