]> git.proxmox.com Git - mirror_frr.git/commitdiff
*: Convert thread_cancelXXX to event_cancelXXX
authorDonald Sharp <sharpd@nvidia.com>
Sat, 10 Dec 2022 14:08:37 +0000 (09:08 -0500)
committerDonald Sharp <sharpd@nvidia.com>
Fri, 24 Mar 2023 12:32:17 +0000 (08:32 -0400)
Modify the code base so that thread_cancel becomes event_cancel

Signed-off-by: Donald Sharp <sharpd@nvidia.com>
37 files changed:
babeld/babeld.c
bfdd/control.c
bgpd/bgp_fsm.h
bgpd/bgp_io.c
bgpd/bgp_labelpool.c
bgpd/bgpd.c
doc/developer/process-architecture.rst
doc/developer/tracing.rst
eigrpd/eigrp_filter.c
eigrpd/eigrp_interface.c
eigrpd/eigrp_neighbor.c
isisd/isis_circuit.c
isisd/isisd.c
lib/agentx.c
lib/event.c
lib/event.h
lib/frr_zmq.c
lib/libfrr_trace.h
lib/northbound_cli.c
lib/spf_backoff.c
lib/zlog_5424.c
nhrpd/netlink_arp.c
ospf6d/ospf6_interface.c
ospf6d/ospf6_top.c
ospfd/ospf_interface.c
ospfd/ospf_neighbor.c
pathd/path_pcep_controller.c
pathd/path_pcep_pcc.c
pathd/path_ted.c
pathd/pathd.c
pimd/pim_msdp.h
tests/lib/test_timer_correctness.c
tests/lib/test_timer_performance.c
watchfrr/watchfrr.c
zebra/dplane_fpm_nl.c
zebra/zebra_dplane.c
zebra/zserv.c

index 7bcf8b9a7861622a0265b599194247c4b298c505..4ce92c520472603d1013ea470f852b9ba57985a7 100644 (file)
@@ -306,8 +306,8 @@ babel_clean_routing_process(void)
     babel_interface_close_all();
 
     /* cancel events */
-    thread_cancel(&babel_routing_process->t_read);
-    thread_cancel(&babel_routing_process->t_update);
+    event_cancel(&babel_routing_process->t_read);
+    event_cancel(&babel_routing_process->t_update);
 
     distribute_list_delete(&babel_routing_process->distribute_ctx);
     XFREE(MTYPE_BABEL, babel_routing_process);
@@ -485,7 +485,7 @@ static void
 babel_set_timer(struct timeval *timeout)
 {
     long msecs = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;
-    thread_cancel(&(babel_routing_process->t_update));
+    event_cancel(&(babel_routing_process->t_update));
     event_add_timer_msec(master, babel_main_loop, NULL, msecs,
                         &babel_routing_process->t_update);
 }
index 95e0ef3d6022779beeedb54b0c242f809953cc5a..739de23b5ace99b2110207a9ca57a336a306b39b 100644 (file)
@@ -132,7 +132,7 @@ void control_shutdown(void)
 {
        struct bfd_control_socket *bcs;
 
-       thread_cancel(&bglobal.bg_csockev);
+       event_cancel(&bglobal.bg_csockev);
 
        socket_close(&bglobal.bg_csock);
 
@@ -185,8 +185,8 @@ static void control_free(struct bfd_control_socket *bcs)
        struct bfd_control_queue *bcq;
        struct bfd_notify_peer *bnp;
 
-       thread_cancel(&(bcs->bcs_ev));
-       thread_cancel(&(bcs->bcs_outev));
+       event_cancel(&(bcs->bcs_ev));
+       event_cancel(&(bcs->bcs_outev));
 
        close(bcs->bcs_sd);
 
@@ -292,7 +292,7 @@ static int control_queue_dequeue(struct bfd_control_socket *bcs)
        return 1;
 
 empty_list:
-       thread_cancel(&(bcs->bcs_outev));
+       event_cancel(&(bcs->bcs_outev));
        bcs->bcs_bout = NULL;
        return 0;
 }
index 9af02436cf5e1659bc6b9dcd19b3cad5d7693720..e3cfd0c8938f8131d42c12a2b67bb056b4f95580 100644 (file)
@@ -24,7 +24,7 @@
 #define BGP_EVENT_FLUSH(P)                                                     \
        do {                                                                   \
                assert(peer);                                                  \
-               thread_cancel_event_ready(bm->master, (P));                    \
+               event_cancel_event_ready(bm->master, (P));                     \
        } while (0)
 
 #define BGP_UPDATE_GROUP_TIMER_ON(T, F)                                        \
index 6af01be315cd1983f50b5ca3d85194e158b1fda6..7781a6c8e8174761706d56962df59fcc62e3d1d0 100644 (file)
@@ -65,7 +65,7 @@ void bgp_writes_off(struct peer *peer)
        struct frr_pthread *fpt = bgp_pth_io;
        assert(fpt->running);
 
-       thread_cancel_async(fpt->master, &peer->t_write, NULL);
+       event_cancel_async(fpt->master, &peer->t_write, NULL);
        THREAD_OFF(peer->t_generate_updgrp_packets);
 
        UNSET_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON);
@@ -96,7 +96,7 @@ void bgp_reads_off(struct peer *peer)
        struct frr_pthread *fpt = bgp_pth_io;
        assert(fpt->running);
 
-       thread_cancel_async(fpt->master, &peer->t_read, NULL);
+       event_cancel_async(fpt->master, &peer->t_read, NULL);
        THREAD_OFF(peer->t_process_packet);
        THREAD_OFF(peer->t_process_packet_error);
 
index e04fb6eb23892b151e6dc58d2ba65548c146a445..629f0c7dd36f8183488fbe2eba52b7f69acb17a3 100644 (file)
@@ -1202,7 +1202,7 @@ static void lptest_stop(void)
        }
 
        if (tcb->event_thread)
-               thread_cancel(&tcb->event_thread);
+               event_cancel(&tcb->event_thread);
 
        lpt_inprogress = false;
 }
@@ -1491,7 +1491,7 @@ static void lptest_delete(void *val)
        }
 
        if (tcb->event_thread)
-               thread_cancel(&tcb->event_thread);
+               event_cancel(&tcb->event_thread);
 
        memset(tcb, 0, sizeof(*tcb));
 
index eb91eaf084d7428da10a79bbb18aa0202d0e8066..73d183ac9577704ef9e767c088e2c390bf90e459 100644 (file)
@@ -1126,7 +1126,7 @@ static void peer_free(struct peer *peer)
        bgp_timer_set(peer);
        bgp_reads_off(peer);
        bgp_writes_off(peer);
-       thread_cancel_event_ready(bm->master, peer);
+       event_cancel_event_ready(bm->master, peer);
        FOREACH_AFI_SAFI (afi, safi)
                THREAD_OFF(peer->t_revalidate_all[afi][safi]);
        assert(!peer->t_write);
@@ -2516,7 +2516,7 @@ int peer_delete(struct peer *peer)
        bgp_keepalives_off(peer);
        bgp_reads_off(peer);
        bgp_writes_off(peer);
-       thread_cancel_event_ready(bm->master, peer);
+       event_cancel_event_ready(bm->master, peer);
        FOREACH_AFI_SAFI (afi, safi)
                THREAD_OFF(peer->t_revalidate_all[afi][safi]);
        assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON));
index 25decffb07fc21fe257c54bdee82f82081c99de2..84f2581c6c462310709ff4c6ecb86aab99236fd4 100644 (file)
@@ -136,7 +136,7 @@ Mapping the general names used in the figure to specific FRR functions:
 - ``task`` is ``struct event *``
 - ``fetch`` is ``thread_fetch()``
 - ``exec()`` is ``thread_call``
-- ``cancel()`` is ``thread_cancel()``
+- ``cancel()`` is ``event_cancel()``
 - ``schedule()`` is any of the various task-specific ``event_add_*`` functions
 
 Adding tasks is done with various task-specific function-like macros. These
@@ -228,7 +228,7 @@ well as *any other pthread*. This serves as the basis for inter-thread
 communication and boils down to a slightly more complicated method of message
 passing, where the messages are the regular task events as used in the
 event-driven model. The only difference is thread cancellation, which requires
-calling ``thread_cancel_async()`` instead of ``thread_cancel`` to cancel a task
+calling ``event_cancel_async()`` instead of ``event_cancel`` to cancel a task
 currently scheduled on a ``threadmaster`` belonging to a different pthread.
 This is necessary to avoid race conditions in the specific case where one
 pthread wants to guarantee that a task on another pthread is cancelled before
index 63b04585f18c2d197a51c325ea9eb0c73a7247a5..76f6004034e70a53b0567304e8eeac438c3bbac0 100644 (file)
@@ -150,8 +150,8 @@ Example::
          frr_libfrr:frr_pthread_stop (loglevel: TRACE_DEBUG_LINE (13)) (type: tracepoint)
          frr_libfrr:frr_pthread_run (loglevel: TRACE_DEBUG_LINE (13)) (type: tracepoint)
          frr_libfrr:thread_call (loglevel: TRACE_INFO (6)) (type: tracepoint)
-         frr_libfrr:thread_cancel_async (loglevel: TRACE_INFO (6)) (type: tracepoint)
-         frr_libfrr:thread_cancel (loglevel: TRACE_INFO (6)) (type: tracepoint)
+         frr_libfrr:event_cancel_async (loglevel: TRACE_INFO (6)) (type: tracepoint)
+         frr_libfrr:event_cancel (loglevel: TRACE_INFO (6)) (type: tracepoint)
          frr_libfrr:schedule_write (loglevel: TRACE_INFO (6)) (type: tracepoint)
          frr_libfrr:schedule_read (loglevel: TRACE_INFO (6)) (type: tracepoint)
          frr_libfrr:schedule_event (loglevel: TRACE_INFO (6)) (type: tracepoint)
index f39530877a9eb840ae18a18e23c982a5800a71da..cfa4b22af698f37d00a931547700471c5256cf51 100644 (file)
@@ -111,7 +111,7 @@ void eigrp_distribute_update(struct distribute_ctx *ctx,
                // TODO: check Graceful restart after 10sec
 
                /* cancel GR scheduled */
-               thread_cancel(&(e->t_distribute));
+               event_cancel(&(e->t_distribute));
 
                /* schedule Graceful restart for whole process in 10sec */
                event_add_timer(master, eigrp_distribute_timer_process, e, (10),
@@ -186,7 +186,7 @@ void eigrp_distribute_update(struct distribute_ctx *ctx,
        // TODO: check Graceful restart after 10sec
 
        /* Cancel GR scheduled */
-       thread_cancel(&(ei->t_distribute));
+       event_cancel(&(ei->t_distribute));
        /* schedule Graceful restart for interface in 10sec */
        event_add_timer(master, eigrp_distribute_timer_interface, ei, 10,
                        &ei->t_distribute);
index cd268fa9b995638b3fa1c4581ea32bc0e25a43b1..297a312a67f49b108ce936adfd9e3dd8aba6e824 100644 (file)
@@ -360,7 +360,7 @@ void eigrp_if_stream_unset(struct eigrp_interface *ei)
        if (ei->on_write_q) {
                listnode_delete(eigrp->oi_write_q, ei);
                if (list_isempty(eigrp->oi_write_q))
-                       thread_cancel(&(eigrp->t_write));
+                       event_cancel(&(eigrp->t_write));
                ei->on_write_q = 0;
        }
 }
@@ -422,7 +422,7 @@ void eigrp_if_free(struct eigrp_interface *ei, int source)
        struct eigrp *eigrp = ei->eigrp;
 
        if (source == INTERFACE_DOWN_BY_VTY) {
-               thread_cancel(&ei->t_hello);
+               event_cancel(&ei->t_hello);
                eigrp_hello_send(ei, EIGRP_HELLO_GRACEFUL_SHUTDOWN, NULL);
        }
 
index e40c4a50bd033fd1a9e3143df5f15d6a501a0457..05d1b12f13f66bcae4f157e0ea162fe1f86af80a 100644 (file)
@@ -164,7 +164,7 @@ void eigrp_nbr_delete(struct eigrp_neighbor *nbr)
                eigrp_topology_neighbor_down(nbr->ei->eigrp, nbr);
 
        /* Cancel all events. */ /* Thread lookup cost would be negligible. */
-       thread_cancel_event(master, nbr);
+       event_cancel_event(master, nbr);
        eigrp_fifo_free(nbr->multicast_queue);
        eigrp_fifo_free(nbr->retrans_queue);
        THREAD_OFF(nbr->t_holddown);
index f5c58dddbfba4a69d654024022fd33382057361b..2a33d102166cb283f807a3891e663bb0294f5506 100644 (file)
@@ -929,7 +929,7 @@ void isis_circuit_down(struct isis_circuit *circuit)
                circuit->snd_stream = NULL;
        }
 
-       thread_cancel_event(master, circuit);
+       event_cancel_event(master, circuit);
 
        return;
 }
index 886849fed5c7dc3d4111db347d065404f9efb3b9..86dbc228f9bc1792a1ed377ff1d3e4b62eb00e03 100644 (file)
@@ -548,7 +548,7 @@ void isis_area_destroy(struct isis_area *area)
        THREAD_OFF(area->t_lsp_refresh[1]);
        THREAD_OFF(area->t_rlfa_rib_update);
 
-       thread_cancel_event(master, area);
+       event_cancel_event(master, area);
 
        listnode_delete(area->isis->area_list, area);
 
index fc724e7d700c41298a252859d75d7ab00c30de56..7f66f9f9c7f92f5e3276c17a6c78da0a928e07f2 100644 (file)
@@ -97,7 +97,7 @@ static void agentx_events_update(void)
        struct event **thr;
        int fd, thr_fd;
 
-       thread_cancel(&timeout_thr);
+       event_cancel(&timeout_thr);
 
        FD_ZERO(&fds);
        snmp_select_info(&maxfd, &fds, &timeout, &block);
@@ -119,7 +119,7 @@ static void agentx_events_update(void)
                if (thr_fd == fd) {
                        struct listnode *nextln = listnextnode(ln);
                        if (!FD_ISSET(fd, &fds)) {
-                               thread_cancel(thr);
+                               event_cancel(thr);
                                XFREE(MTYPE_TMP, thr);
                                list_delete_node(events, ln);
                        }
@@ -142,7 +142,7 @@ static void agentx_events_update(void)
        while (ln) {
                struct listnode *nextln = listnextnode(ln);
                thr = listgetdata(ln);
-               thread_cancel(thr);
+               event_cancel(thr);
                XFREE(MTYPE_TMP, thr);
                list_delete_node(events, ln);
                ln = nextln;
index f081ec4274bfbe1c62af3c220f60a93645fd64be..a3d2ec7a39a063ab72614fae7c62c9ec478118a2 100644 (file)
@@ -38,7 +38,7 @@ struct cancel_req {
 };
 
 /* Flags for task cancellation */
-#define THREAD_CANCEL_FLAG_READY     0x01
+#define EVENT_CANCEL_FLAG_READY 0x01
 
 static int thread_timer_cmp(const struct event *a, const struct event *b)
 {
@@ -1169,8 +1169,8 @@ void _event_add_event(const struct xref_threadsched *xref,
  *   - POLLIN
  *   - POLLOUT
  */
-static void thread_cancel_rw(struct thread_master *master, int fd, short state,
-                            int idx_hint)
+static void event_cancel_rw(struct thread_master *master, int fd, short state,
+                           int idx_hint)
 {
        bool found = false;
 
@@ -1267,7 +1267,7 @@ static void cancel_arg_helper(struct thread_master *master,
        }
 
        /* If requested, stop here and ignore io and timers */
-       if (CHECK_FLAG(cr->flags, THREAD_CANCEL_FLAG_READY))
+       if (CHECK_FLAG(cr->flags, EVENT_CANCEL_FLAG_READY))
                return;
 
        /* Check the io tasks */
@@ -1283,7 +1283,7 @@ static void cancel_arg_helper(struct thread_master *master,
                        fd = pfd->fd;
 
                        /* Found a match to cancel: clean up fd arrays */
-                       thread_cancel_rw(master, pfd->fd, pfd->events, i);
+                       event_cancel_rw(master, pfd->fd, pfd->events, i);
 
                        /* Clean up thread arrays */
                        master->read[fd] = NULL;
@@ -1328,7 +1328,7 @@ static void cancel_arg_helper(struct thread_master *master,
  * @param master the thread master to process
  * @REQUIRE master->mtx
  */
-static void do_thread_cancel(struct thread_master *master)
+static void do_event_cancel(struct thread_master *master)
 {
        struct thread_list_head *list = NULL;
        struct event **thread_array = NULL;
@@ -1364,11 +1364,11 @@ static void do_thread_cancel(struct thread_master *master)
                /* Determine the appropriate queue to cancel the thread from */
                switch (thread->type) {
                case THREAD_READ:
-                       thread_cancel_rw(master, thread->u.fd, POLLIN, -1);
+                       event_cancel_rw(master, thread->u.fd, POLLIN, -1);
                        thread_array = master->read;
                        break;
                case THREAD_WRITE:
-                       thread_cancel_rw(master, thread->u.fd, POLLOUT, -1);
+                       event_cancel_rw(master, thread->u.fd, POLLOUT, -1);
                        thread_array = master->write;
                        break;
                case THREAD_TIMER:
@@ -1401,7 +1401,7 @@ static void do_thread_cancel(struct thread_master *master)
        if (master->cancel_req)
                list_delete_all_node(master->cancel_req);
 
-       /* Wake up any threads which may be blocked in thread_cancel_async() */
+       /* Wake up any threads which may be blocked in event_cancel_async() */
        master->canceled = true;
        pthread_cond_broadcast(&master->cancel_cond);
 }
@@ -1426,7 +1426,7 @@ static void cancel_event_helper(struct thread_master *m, void *arg, int flags)
        frr_with_mutex (&m->mtx) {
                cr->eventobj = arg;
                listnode_add(m->cancel_req, cr);
-               do_thread_cancel(m);
+               do_event_cancel(m);
        }
 }
 
@@ -1438,7 +1438,7 @@ static void cancel_event_helper(struct thread_master *m, void *arg, int flags)
  * @param m the thread_master to cancel from
  * @param arg the argument passed when creating the event
  */
-void thread_cancel_event(struct thread_master *master, void *arg)
+void event_cancel_event(struct thread_master *master, void *arg)
 {
        cancel_event_helper(master, arg, 0);
 }
@@ -1451,11 +1451,11 @@ void thread_cancel_event(struct thread_master *master, void *arg)
  * @param m the thread_master to cancel from
  * @param arg the argument passed when creating the event
  */
-void thread_cancel_event_ready(struct thread_master *m, void *arg)
+void event_cancel_event_ready(struct thread_master *m, void *arg)
 {
 
        /* Only cancel ready/event tasks */
-       cancel_event_helper(m, arg, THREAD_CANCEL_FLAG_READY);
+       cancel_event_helper(m, arg, EVENT_CANCEL_FLAG_READY);
 }
 
 /**
@@ -1465,7 +1465,7 @@ void thread_cancel_event_ready(struct thread_master *m, void *arg)
  *
  * @param thread task to cancel
  */
-void thread_cancel(struct event **thread)
+void event_cancel(struct event **thread)
 {
        struct thread_master *master;
 
@@ -1474,10 +1474,10 @@ void thread_cancel(struct event **thread)
 
        master = (*thread)->master;
 
-       frrtrace(9, frr_libfrr, thread_cancel, master,
-                (*thread)->xref->funcname, (*thread)->xref->xref.file,
-                (*thread)->xref->xref.line, NULL, (*thread)->u.fd,
-                (*thread)->u.val, (*thread)->arg, (*thread)->u.sands.tv_sec);
+       frrtrace(9, frr_libfrr, event_cancel, master, (*thread)->xref->funcname,
+                (*thread)->xref->xref.file, (*thread)->xref->xref.line, NULL,
+                (*thread)->u.fd, (*thread)->u.val, (*thread)->arg,
+                (*thread)->u.sands.tv_sec);
 
        assert(master->owner == pthread_self());
 
@@ -1486,7 +1486,7 @@ void thread_cancel(struct event **thread)
                        XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
                cr->thread = *thread;
                listnode_add(master->cancel_req, cr);
-               do_thread_cancel(master);
+               do_event_cancel(master);
        }
 
        *thread = NULL;
@@ -1516,19 +1516,19 @@ void thread_cancel(struct event **thread)
  * @param thread pointer to thread to cancel
  * @param eventobj the event
  */
-void thread_cancel_async(struct thread_master *master, struct event **thread,
-                        void *eventobj)
+void event_cancel_async(struct thread_master *master, struct event **thread,
+                       void *eventobj)
 {
        assert(!(thread && eventobj) && (thread || eventobj));
 
        if (thread && *thread)
-               frrtrace(9, frr_libfrr, thread_cancel_async, master,
+               frrtrace(9, frr_libfrr, event_cancel_async, master,
                         (*thread)->xref->funcname, (*thread)->xref->xref.file,
                         (*thread)->xref->xref.line, NULL, (*thread)->u.fd,
                         (*thread)->u.val, (*thread)->arg,
                         (*thread)->u.sands.tv_sec);
        else
-               frrtrace(9, frr_libfrr, thread_cancel_async, master, NULL, NULL,
+               frrtrace(9, frr_libfrr, event_cancel_async, master, NULL, NULL,
                         0, NULL, 0, 0, eventobj, 0);
 
        assert(master->owner != pthread_self());
@@ -1638,10 +1638,10 @@ static void thread_process_io(struct thread_master *m, unsigned int num)
                ready++;
 
                /*
-                * Unless someone has called thread_cancel from another
+                * Unless someone has called event_cancel from another
                 * pthread, the only thing that could have changed in
                 * m->handler.pfds while we were asleep is the .events
-                * field in a given pollfd. Barring thread_cancel() that
+                * field in a given pollfd. Barring event_cancel() that
                 * value should be a superset of the values we have in our
                 * copy, so there's no need to update it. Similarily,
                 * barring deletion, the fd should still be a valid index
@@ -1758,7 +1758,7 @@ struct event *thread_fetch(struct thread_master *m, struct event *fetch)
                pthread_mutex_lock(&m->mtx);
 
                /* Process any pending cancellation requests */
-               do_thread_cancel(m);
+               do_event_cancel(m);
 
                /*
                 * Attempt to flush ready queue before going into poll().
index 375cd46bbfdbded08b8c1d106e8ed07ae4543a94..e5b6506fd01f9b96ddcb3643bb3c2a04340b89d4 100644 (file)
@@ -158,10 +158,10 @@ struct cpu_thread_history {
 /*
  * Please consider this macro deprecated, and do not use it in new code.
  */
-#define THREAD_OFF(thread)                                             \
-       do {                                                           \
-               if ((thread))                                          \
-                       thread_cancel(&(thread));                      \
+#define THREAD_OFF(thread)                                                     \
+       do {                                                                   \
+               if ((thread))                                                  \
+                       event_cancel(&(thread));                               \
        } while (0)
 
 /*
@@ -241,13 +241,12 @@ extern void _thread_execute(const struct xref_threadsched *xref,
                            struct thread_master *master,
                            void (*fn)(struct event *), void *arg, int val);
 
-extern void thread_cancel(struct event **event);
-extern void thread_cancel_async(struct thread_master *, struct event **,
-                               void *);
+extern void event_cancel(struct event **event);
+extern void event_cancel_async(struct thread_master *, struct event **, void *);
 /* Cancel ready tasks with an arg matching 'arg' */
-extern void thread_cancel_event_ready(struct thread_master *m, void *arg);
+extern void event_cancel_event_ready(struct thread_master *m, void *arg);
 /* Cancel all tasks with an arg matching 'arg', including timers and io */
-extern void thread_cancel_event(struct thread_master *m, void *arg);
+extern void event_cancel_event(struct thread_master *m, void *arg);
 extern struct event *thread_fetch(struct thread_master *, struct event *event);
 extern void thread_call(struct event *event);
 extern unsigned long thread_timer_remain_second(struct event *event);
index 65e1cf5cfb36e201aafcb5cca2881fe7e0b75f79..4a860fe253f589584730adefe5e9a20160e6ce36 100644 (file)
@@ -190,7 +190,7 @@ int _frrzmq_event_add_read(const struct xref_threadsched *xref,
        cb->in_cb = false;
 
        if (events & ZMQ_POLLIN) {
-               thread_cancel(&cb->read.thread);
+               event_cancel(&cb->read.thread);
 
                event_add_event(master, frrzmq_read_msg, cbp, fd,
                                &cb->read.thread);
@@ -296,7 +296,7 @@ int _frrzmq_event_add_write(const struct xref_threadsched *xref,
        cb->in_cb = false;
 
        if (events & ZMQ_POLLOUT) {
-               thread_cancel(&cb->write.thread);
+               event_cancel(&cb->write.thread);
 
                _event_add_event(xref, master, frrzmq_write_msg, cbp, fd,
                                 &cb->write.thread);
@@ -311,7 +311,7 @@ void frrzmq_thread_cancel(struct frrzmq_cb **cb, struct cb_core *core)
        if (!cb || !*cb)
                return;
        core->cancelled = true;
-       thread_cancel(&core->thread);
+       event_cancel(&core->thread);
 
        /* If cancelled from within a callback, don't try to free memory
         * in this path.
@@ -344,7 +344,7 @@ void frrzmq_check_events(struct frrzmq_cb **cbp, struct cb_core *core,
        if ((events & event) && core->thread && !core->cancelled) {
                struct thread_master *tm = core->thread->master;
 
-               thread_cancel(&core->thread);
+               event_cancel(&core->thread);
 
                if (event == ZMQ_POLLIN)
                        event_add_event(tm, frrzmq_read_msg, cbp, cb->fd,
index 2a1bb2f6c3dff60ea880b8df4a2f039b4bb6b0b2..ed1dcfb159e765498cfb5d85a05c518aeac9e65b 100644 (file)
@@ -103,8 +103,8 @@ THREAD_OPERATION_TRACEPOINT_INSTANCE(schedule_timer)
 THREAD_OPERATION_TRACEPOINT_INSTANCE(schedule_event)
 THREAD_OPERATION_TRACEPOINT_INSTANCE(schedule_read)
 THREAD_OPERATION_TRACEPOINT_INSTANCE(schedule_write)
-THREAD_OPERATION_TRACEPOINT_INSTANCE(thread_cancel)
-THREAD_OPERATION_TRACEPOINT_INSTANCE(thread_cancel_async)
+THREAD_OPERATION_TRACEPOINT_INSTANCE(event_cancel)
+THREAD_OPERATION_TRACEPOINT_INSTANCE(event_cancel_async)
 THREAD_OPERATION_TRACEPOINT_INSTANCE(thread_call)
 
 TRACEPOINT_EVENT(
index 704904304c0349541a014c4c36d74dbab51bc50d..edfca75cd27157bda7089f45fdbd218511c0e3e4 100644 (file)
@@ -265,7 +265,7 @@ int nb_cli_rpc(struct vty *vty, const char *xpath, struct list *input,
 
 void nb_cli_confirmed_commit_clean(struct vty *vty)
 {
-       thread_cancel(&vty->t_confirmed_commit_timeout);
+       event_cancel(&vty->t_confirmed_commit_timeout);
        nb_config_free(vty->confirmed_commit_rollback);
        vty->confirmed_commit_rollback = NULL;
 }
@@ -328,7 +328,7 @@ static int nb_cli_commit(struct vty *vty, bool force,
                                "%% Resetting confirmed-commit timeout to %u minute(s)\n\n",
                                confirmed_timeout);
 
-                       thread_cancel(&vty->t_confirmed_commit_timeout);
+                       event_cancel(&vty->t_confirmed_commit_timeout);
                        event_add_timer(master, nb_cli_confirmed_commit_timeout,
                                        vty, confirmed_timeout * 60,
                                        &vty->t_confirmed_commit_timeout);
index f1fb84e0b5a7fcfad1a8866fdb4111d567525d34..367386e6a4f6537d663dc72ba1f6da1a7135fcfb 100644 (file)
@@ -97,8 +97,8 @@ void spf_backoff_free(struct spf_backoff *backoff)
        if (!backoff)
                return;
 
-       thread_cancel(&backoff->t_holddown);
-       thread_cancel(&backoff->t_timetolearn);
+       event_cancel(&backoff->t_holddown);
+       event_cancel(&backoff->t_timetolearn);
        XFREE(MTYPE_SPF_BACKOFF_NAME, backoff->name);
 
        XFREE(MTYPE_SPF_BACKOFF, backoff);
@@ -150,7 +150,7 @@ long spf_backoff_schedule(struct spf_backoff *backoff)
                break;
        case SPF_BACKOFF_SHORT_WAIT:
        case SPF_BACKOFF_LONG_WAIT:
-               thread_cancel(&backoff->t_holddown);
+               event_cancel(&backoff->t_holddown);
                event_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
                                     backoff, backoff->holddown,
                                     &backoff->t_holddown);
index 7bfc89bb22dc1f5c792adb9633fa9131579a9d13..2a0e6aff859ab2dab2c440992bdecd0414df7cff 100644 (file)
@@ -1053,7 +1053,7 @@ bool zlog_5424_apply_dst(struct zlog_cfg_5424 *zcf)
 {
        int fd = -1;
 
-       thread_cancel(&zcf->t_reconnect);
+       event_cancel(&zcf->t_reconnect);
 
        if (zcf->prio_min != ZLOG_DISABLED)
                fd = zlog_5424_open(zcf, -1);
@@ -1106,7 +1106,7 @@ bool zlog_5424_rotate(struct zlog_cfg_5424 *zcf)
                if (!zcf->active)
                        return true;
 
-               thread_cancel(&zcf->t_reconnect);
+               event_cancel(&zcf->t_reconnect);
 
                /* need to retain the socket type because it also influences
                 * other fields (packets) and we can't atomically swap these
index 746cac044dbcb8ce19744e63a8d20756e00e35a5..c929f668060467ce8ab796e8dd00813e18ee61bd 100644 (file)
@@ -125,7 +125,7 @@ static void netlink_log_recv(struct event *t)
 void netlink_set_nflog_group(int nlgroup)
 {
        if (netlink_log_fd >= 0) {
-               thread_cancel(&netlink_log_thread);
+               event_cancel(&netlink_log_thread);
                close(netlink_log_fd);
                netlink_log_fd = -1;
        }
index baf9b7691afe55c28b9f3106933d1e5ad51d3ae7..7828f35d54c8fe97cca5a2aefe9bcbd2d58a07b4 100644 (file)
@@ -731,7 +731,7 @@ void interface_up(struct event *thread)
        if (!oi->type_cfg)
                oi->type = ospf6_default_iftype(oi->interface);
 
-       thread_cancel(&oi->thread_sso);
+       event_cancel(&oi->thread_sso);
 
        if (IS_OSPF6_DEBUG_INTERFACE)
                zlog_debug("Interface Event %s: [InterfaceUp]",
@@ -935,7 +935,7 @@ void interface_down(struct event *thread)
        if (oi->on_write_q) {
                listnode_delete(ospf6->oi_write_q, oi);
                if (list_isempty(ospf6->oi_write_q))
-                       thread_cancel(&ospf6->t_write);
+                       event_cancel(&ospf6->t_write);
                oi->on_write_q = 0;
        }
 
index 203d344b3a246b5360b3d12d3f1e3974a0c38ce9..db45198acd3040869788ade4c95560599f23dbe9 100644 (file)
@@ -176,7 +176,7 @@ static int ospf6_vrf_disable(struct vrf *vrf)
                 * from VRF and make it "down".
                 */
                ospf6_vrf_unlink(ospf6, vrf);
-               thread_cancel(&ospf6->t_ospf6_receive);
+               event_cancel(&ospf6->t_ospf6_receive);
                close(ospf6->fd);
                ospf6->fd = -1;
        }
index 6f0faabc7361e1f932ffe335235500d6229606a3..170aca3d5a46f1b5612ebb21c997fa063c594887 100644 (file)
@@ -356,7 +356,7 @@ void ospf_if_free(struct ospf_interface *oi)
        listnode_delete(oi->ospf->oiflist, oi);
        listnode_delete(oi->area->oiflist, oi);
 
-       thread_cancel_event(master, oi);
+       event_cancel_event(master, oi);
 
        memset(oi, 0, sizeof(*oi));
        XFREE(MTYPE_OSPF_IF, oi);
index d341b2dba187b1cd5222e1bd1f94bca0528422ef..2bbf276dbe6deaea5eac2c9e5b4332b63984d089 100644 (file)
@@ -131,7 +131,7 @@ void ospf_nbr_free(struct ospf_neighbor *nbr)
        THREAD_OFF(nbr->t_ls_upd);
 
        /* Cancel all events. */ /* Thread lookup cost would be negligible. */
-       thread_cancel_event(master, nbr);
+       event_cancel_event(master, nbr);
 
        bfd_sess_free(&nbr->bfd_session);
 
index 7df3143129d46e06e35a6391e5f20478ee68a8bd..53963c50bc7b6a4da46c11a5acf15c6a3b27c3f2 100644 (file)
@@ -398,9 +398,9 @@ void pcep_thread_cancel_timer(struct event **thread)
        }
 
        if ((*thread)->master->owner == pthread_self()) {
-               thread_cancel(thread);
+               event_cancel(thread);
        } else {
-               thread_cancel_async((*thread)->master, thread, NULL);
+               event_cancel_async((*thread)->master, thread, NULL);
        }
 }
 
index 0ff0b4403a226cefa9c74223b6612eb4b48048bf..f18eff28888ac39527a84b6f604f15a0af4088cb 100644 (file)
@@ -190,17 +190,17 @@ void pcep_pcc_finalize(struct ctrl_state *ctrl_state,
        }
 
        if (pcc_state->t_reconnect != NULL) {
-               thread_cancel(&pcc_state->t_reconnect);
+               event_cancel(&pcc_state->t_reconnect);
                pcc_state->t_reconnect = NULL;
        }
 
        if (pcc_state->t_update_best != NULL) {
-               thread_cancel(&pcc_state->t_update_best);
+               event_cancel(&pcc_state->t_update_best);
                pcc_state->t_update_best = NULL;
        }
 
        if (pcc_state->t_session_timeout != NULL) {
-               thread_cancel(&pcc_state->t_session_timeout);
+               event_cancel(&pcc_state->t_session_timeout);
                pcc_state->t_session_timeout = NULL;
        }
 
@@ -340,7 +340,7 @@ int pcep_pcc_enable(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state)
        assert(pcc_state->sess == NULL);
 
        if (pcc_state->t_reconnect != NULL) {
-               thread_cancel(&pcc_state->t_reconnect);
+               event_cancel(&pcc_state->t_reconnect);
                pcc_state->t_reconnect = NULL;
        }
 
@@ -408,7 +408,7 @@ int pcep_pcc_enable(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state)
 
        // In case some best pce alternative were waiting to activate
        if (pcc_state->t_update_best != NULL) {
-               thread_cancel(&pcc_state->t_update_best);
+               event_cancel(&pcc_state->t_update_best);
                pcc_state->t_update_best = NULL;
        }
 
index 0a2a74cb4298e5976d44ade8e3d2484bac33c751..91033360cce87c999a78ab508a0224ead04317b0 100644 (file)
@@ -657,7 +657,7 @@ void path_ted_timer_handler_refresh(struct event *thread)
 void path_ted_timer_sync_cancel(void)
 {
        if (ted_state_g.t_link_state_sync != NULL) {
-               thread_cancel(&ted_state_g.t_link_state_sync);
+               event_cancel(&ted_state_g.t_link_state_sync);
                ted_state_g.t_link_state_sync = NULL;
        }
 }
@@ -672,7 +672,7 @@ void path_ted_timer_sync_cancel(void)
 void path_ted_timer_refresh_cancel(void)
 {
        if (ted_state_g.t_segment_list_refresh != NULL) {
-               thread_cancel(&ted_state_g.t_segment_list_refresh);
+               event_cancel(&ted_state_g.t_segment_list_refresh);
                ted_state_g.t_segment_list_refresh = NULL;
        }
 }
index 0b7792a5cc6b433fb75e510ff535b9335de3ecd9..d1c0c82d9d8034b916173a00f86eae5e91adeed9 100644 (file)
@@ -1329,7 +1329,7 @@ void trigger_pathd_candidate_removed(struct srte_candidate *candidate)
        /* The hook needs to be call synchronously, otherwise the candidate
        path will be already deleted when the handler is called */
        if (candidate->hook_timer != NULL) {
-               thread_cancel(&candidate->hook_timer);
+               event_cancel(&candidate->hook_timer);
                candidate->hook_timer = NULL;
        }
        hook_call(pathd_candidate_removed, candidate);
index cae7dd17b6109f130dc16d04c175da065c4a4a11..753279a700b6c4c9399d4497eab74f0ae20d3e26 100644 (file)
@@ -211,8 +211,8 @@ struct pim_msdp {
        event_add_write(mp->pim->msdp.master, pim_msdp_write, mp, mp->fd,      \
                        &mp->t_write)
 
-#define PIM_MSDP_PEER_READ_OFF(mp) thread_cancel(&mp->t_read)
-#define PIM_MSDP_PEER_WRITE_OFF(mp) thread_cancel(&mp->t_write)
+#define PIM_MSDP_PEER_READ_OFF(mp) event_cancel(&mp->t_read)
+#define PIM_MSDP_PEER_WRITE_OFF(mp) event_cancel(&mp->t_write)
 
 #if PIM_IPV != 6
 // struct pim_msdp *msdp;
index b54dbe775d8042380362aa3ab4579d0ca16de4a8..54c28fc5b49b8266847d43f543202bdcd09783bb 100644 (file)
@@ -137,7 +137,7 @@ int main(int argc, char **argv)
                        continue;
 
                XFREE(MTYPE_TMP, timers[index]->arg);
-               thread_cancel(&timers[index]);
+               event_cancel(&timers[index]);
                timers_pending--;
        }
 
index ba2aacc8bd4e53a244ed100be8cd72d7c02f79fd..c5ac1fa834025d147ea6a83c35eaf0e5badac45c 100644 (file)
@@ -44,7 +44,7 @@ int main(int argc, char **argv)
                event_add_timer_msec(master, dummy_func, NULL, 0, &timers[i]);
        }
        for (i = 0; i < SCHEDULE_TIMERS; i++)
-               thread_cancel(&timers[i]);
+               event_cancel(&timers[i]);
 
        monotime(&tv_start);
 
@@ -62,7 +62,7 @@ int main(int argc, char **argv)
                int index;
 
                index = prng_rand(prng) % SCHEDULE_TIMERS;
-               thread_cancel(&timers[index]);
+               event_cancel(&timers[index]);
        }
 
        monotime(&tv_stop);
index b3852f86bc826886057418f43e5cd71d45c8d1c4..8f17f7a9a2dc7a1deaa5624cbaba8068a10d438b 100644 (file)
@@ -415,7 +415,7 @@ static void sigchild(void)
                what = restart->what;
                restart->pid = 0;
                gs.numpids--;
-               thread_cancel(&restart->t_kill);
+               event_cancel(&restart->t_kill);
 
                /* Update restart time to reflect the time the command
                 * completed. */
@@ -688,7 +688,7 @@ static void handle_read(struct event *t_read)
                           dmn->name, (long)delay.tv_sec, (long)delay.tv_usec);
 
        SET_READ_HANDLER(dmn);
-       thread_cancel(&dmn->t_wakeup);
+       event_cancel(&dmn->t_wakeup);
        SET_WAKEUP_ECHO(dmn);
 }
 
@@ -874,7 +874,7 @@ static void phase_hanging(struct event *t_hanging)
 static void set_phase(enum restart_phase new_phase)
 {
        gs.phase = new_phase;
-       thread_cancel(&gs.t_phase_hanging);
+       event_cancel(&gs.t_phase_hanging);
 
        event_add_timer(master, phase_hanging, NULL, PHASE_TIMEOUT,
                        &gs.t_phase_hanging);
index 1cfcac858912b4028e146773205f5ff0b1b9c8c8..b5397ce29192485ed5f2f2261fd459bc4842a989 100644 (file)
@@ -414,14 +414,14 @@ static void fpm_connect(struct event *t);
 static void fpm_reconnect(struct fpm_nl_ctx *fnc)
 {
        /* Cancel all zebra threads first. */
-       thread_cancel_async(zrouter.master, &fnc->t_lspreset, NULL);
-       thread_cancel_async(zrouter.master, &fnc->t_lspwalk, NULL);
-       thread_cancel_async(zrouter.master, &fnc->t_nhgreset, NULL);
-       thread_cancel_async(zrouter.master, &fnc->t_nhgwalk, NULL);
-       thread_cancel_async(zrouter.master, &fnc->t_ribreset, NULL);
-       thread_cancel_async(zrouter.master, &fnc->t_ribwalk, NULL);
-       thread_cancel_async(zrouter.master, &fnc->t_rmacreset, NULL);
-       thread_cancel_async(zrouter.master, &fnc->t_rmacwalk, NULL);
+       event_cancel_async(zrouter.master, &fnc->t_lspreset, NULL);
+       event_cancel_async(zrouter.master, &fnc->t_lspwalk, NULL);
+       event_cancel_async(zrouter.master, &fnc->t_nhgreset, NULL);
+       event_cancel_async(zrouter.master, &fnc->t_nhgwalk, NULL);
+       event_cancel_async(zrouter.master, &fnc->t_ribreset, NULL);
+       event_cancel_async(zrouter.master, &fnc->t_ribwalk, NULL);
+       event_cancel_async(zrouter.master, &fnc->t_rmacreset, NULL);
+       event_cancel_async(zrouter.master, &fnc->t_rmacwalk, NULL);
 
        /*
         * Grab the lock to empty the streams (data plane might try to
@@ -1484,9 +1484,9 @@ static int fpm_nl_finish_early(struct fpm_nl_ctx *fnc)
        THREAD_OFF(fnc->t_rmacwalk);
        THREAD_OFF(fnc->t_event);
        THREAD_OFF(fnc->t_nhg);
-       thread_cancel_async(fnc->fthread->master, &fnc->t_read, NULL);
-       thread_cancel_async(fnc->fthread->master, &fnc->t_write, NULL);
-       thread_cancel_async(fnc->fthread->master, &fnc->t_connect, NULL);
+       event_cancel_async(fnc->fthread->master, &fnc->t_read, NULL);
+       event_cancel_async(fnc->fthread->master, &fnc->t_write, NULL);
+       event_cancel_async(fnc->fthread->master, &fnc->t_connect, NULL);
 
        if (fnc->socket != -1) {
                close(fnc->socket);
index adfabd0b091c3787582aa8376ee361f557b367be..333211538c15a84d0cadd3e5eb5fc4e24e951d50 100644 (file)
@@ -5874,11 +5874,11 @@ void zebra_dplane_ns_enable(struct zebra_ns *zns, bool enabled)
 
                /* Stop any outstanding tasks */
                if (zdplane_info.dg_master) {
-                       thread_cancel_async(zdplane_info.dg_master,
-                                           &zi->t_request, NULL);
+                       event_cancel_async(zdplane_info.dg_master,
+                                          &zi->t_request, NULL);
 
-                       thread_cancel_async(zdplane_info.dg_master, &zi->t_read,
-                                           NULL);
+                       event_cancel_async(zdplane_info.dg_master, &zi->t_read,
+                                          NULL);
                }
 
                XFREE(MTYPE_DP_NS, zi);
@@ -6854,8 +6854,8 @@ void zebra_dplane_shutdown(void)
        zdplane_info.dg_run = false;
 
        if (zdplane_info.dg_t_update)
-               thread_cancel_async(zdplane_info.dg_t_update->master,
-                                   &zdplane_info.dg_t_update, NULL);
+               event_cancel_async(zdplane_info.dg_t_update->master,
+                                  &zdplane_info.dg_t_update, NULL);
 
        frr_pthread_stop(zdplane_info.dg_pthread, NULL);
 
index c3ef1557b741d3f697d87a6c8bbd4b994754b977..f9eb7a8e3f862805159a3ff74a14ad8cca251b66 100644 (file)
@@ -670,7 +670,7 @@ void zserv_close_client(struct zserv *client)
                        zlog_debug("Closing client '%s'",
                                   zebra_route_string(client->proto));
 
-               thread_cancel_event(zrouter.master, client);
+               event_cancel_event(zrouter.master, client);
                THREAD_OFF(client->t_cleanup);
                THREAD_OFF(client->t_process);