1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Quagga Work Queue Support.
5 * Copyright (C) 2005 Sun Microsystems, Inc.
11 #include "workqueue.h"
16 DEFINE_MTYPE(LIB
, WORK_QUEUE
, "Work queue");
17 DEFINE_MTYPE_STATIC(LIB
, WORK_QUEUE_ITEM
, "Work queue item");
18 DEFINE_MTYPE_STATIC(LIB
, WORK_QUEUE_NAME
, "Work queue name string");
20 /* master list of work_queues */
21 static struct list _work_queues
;
22 /* pointer primarily to avoid an otherwise harmless warning on
23 * ALL_LIST_ELEMENTS_RO
25 static struct list
*work_queues
= &_work_queues
;
27 #define WORK_QUEUE_MIN_GRANULARITY 1
29 static struct work_queue_item
*work_queue_item_new(struct work_queue
*wq
)
31 struct work_queue_item
*item
;
34 item
= XCALLOC(MTYPE_WORK_QUEUE_ITEM
, sizeof(struct work_queue_item
));
39 static void work_queue_item_free(struct work_queue_item
*item
)
41 XFREE(MTYPE_WORK_QUEUE_ITEM
, item
);
45 static void work_queue_item_remove(struct work_queue
*wq
,
46 struct work_queue_item
*item
)
48 assert(item
&& item
->data
);
50 /* call private data deletion callback if needed */
51 if (wq
->spec
.del_item_data
)
52 wq
->spec
.del_item_data(wq
, item
->data
);
54 work_queue_item_dequeue(wq
, item
);
56 work_queue_item_free(item
);
61 /* create new work queue */
62 struct work_queue
*work_queue_new(struct event_loop
*m
, const char *queue_name
)
64 struct work_queue
*new;
66 new = XCALLOC(MTYPE_WORK_QUEUE
, sizeof(struct work_queue
));
68 new->name
= XSTRDUP(MTYPE_WORK_QUEUE_NAME
, queue_name
);
70 SET_FLAG(new->flags
, WQ_UNPLUGGED
);
72 STAILQ_INIT(&new->items
);
74 listnode_add(work_queues
, new);
76 new->cycles
.granularity
= WORK_QUEUE_MIN_GRANULARITY
;
78 /* Default values, can be overridden by caller */
79 new->spec
.hold
= WORK_QUEUE_DEFAULT_HOLD
;
80 new->spec
.yield
= EVENT_YIELD_TIME_SLOT
;
81 new->spec
.retry
= WORK_QUEUE_DEFAULT_RETRY
;
86 void work_queue_free_and_null(struct work_queue
**wqp
)
88 struct work_queue
*wq
= *wqp
;
90 EVENT_OFF(wq
->thread
);
92 while (!work_queue_empty(wq
)) {
93 struct work_queue_item
*item
= work_queue_last_item(wq
);
95 work_queue_item_remove(wq
, item
);
98 listnode_delete(work_queues
, wq
);
100 XFREE(MTYPE_WORK_QUEUE_NAME
, wq
->name
);
101 XFREE(MTYPE_WORK_QUEUE
, wq
);
106 bool work_queue_is_scheduled(struct work_queue
*wq
)
108 return event_is_scheduled(wq
->thread
);
111 static int work_queue_schedule(struct work_queue
*wq
, unsigned int delay
)
113 /* if appropriate, schedule work queue thread */
114 if (CHECK_FLAG(wq
->flags
, WQ_UNPLUGGED
) &&
115 !event_is_scheduled(wq
->thread
) && !work_queue_empty(wq
)) {
116 /* Schedule timer if there's a delay, otherwise just schedule
120 event_add_timer_msec(wq
->master
, work_queue_run
, wq
,
122 event_ignore_late_timer(wq
->thread
);
124 event_add_event(wq
->master
, work_queue_run
, wq
, 0,
127 /* set thread yield time, if needed */
128 if (event_is_scheduled(wq
->thread
) &&
129 wq
->spec
.yield
!= EVENT_YIELD_TIME_SLOT
)
130 event_set_yield_time(wq
->thread
, wq
->spec
.yield
);
136 void work_queue_add(struct work_queue
*wq
, void *data
)
138 struct work_queue_item
*item
;
142 item
= work_queue_item_new(wq
);
145 work_queue_item_enqueue(wq
, item
);
147 work_queue_schedule(wq
, wq
->spec
.hold
);
152 static void work_queue_item_requeue(struct work_queue
*wq
,
153 struct work_queue_item
*item
)
155 work_queue_item_dequeue(wq
, item
);
157 /* attach to end of list */
158 work_queue_item_enqueue(wq
, item
);
161 DEFUN (show_work_queues
,
162 show_work_queues_cmd
,
165 "Work Queue information\n")
167 struct listnode
*node
;
168 struct work_queue
*wq
;
170 vty_out(vty
, "%c %8s %5s %8s %8s %21s\n", ' ', "List", "(ms) ",
171 "Q. Runs", "Yields", "Cycle Counts ");
172 vty_out(vty
, "%c %8s %5s %8s %8s %7s %6s %8s %6s %s\n", 'P', "Items",
173 "Hold", "Total", "Total", "Best", "Gran.", "Total", "Avg.",
176 for (ALL_LIST_ELEMENTS_RO(work_queues
, node
, wq
)) {
177 vty_out(vty
, "%c %8d %5d %8ld %8ld %7d %6d %8ld %6u %s\n",
178 (CHECK_FLAG(wq
->flags
, WQ_UNPLUGGED
) ? ' ' : 'P'),
179 work_queue_item_count(wq
), wq
->spec
.hold
, wq
->runs
,
180 wq
->yields
, wq
->cycles
.best
, wq
->cycles
.granularity
,
182 (wq
->runs
) ? (unsigned int)(wq
->cycles
.total
/ wq
->runs
)
190 void workqueue_cmd_init(void)
192 install_element(VIEW_NODE
, &show_work_queues_cmd
);
195 /* 'plug' a queue: Stop it from being scheduled,
196 * ie: prevent the queue from draining.
198 void work_queue_plug(struct work_queue
*wq
)
200 EVENT_OFF(wq
->thread
);
202 UNSET_FLAG(wq
->flags
, WQ_UNPLUGGED
);
205 /* unplug queue, schedule it again, if appropriate
206 * Ie: Allow the queue to be drained again
208 void work_queue_unplug(struct work_queue
*wq
)
210 SET_FLAG(wq
->flags
, WQ_UNPLUGGED
);
212 /* if thread isnt already waiting, add one */
213 work_queue_schedule(wq
, wq
->spec
.hold
);
216 /* timer thread to process a work queue
217 * will reschedule itself if required,
218 * otherwise work_queue_item_add
220 void work_queue_run(struct event
*thread
)
222 struct work_queue
*wq
;
223 struct work_queue_item
*item
, *titem
;
224 wq_item_status ret
= WQ_SUCCESS
;
225 unsigned int cycles
= 0;
228 wq
= EVENT_ARG(thread
);
232 /* calculate cycle granularity:
233 * list iteration == 1 run
234 * listnode processing == 1 cycle
235 * granularity == # cycles between checks whether we should yield.
237 * granularity should be > 0, and can increase slowly after each run to
238 * provide some hysteris, but not past cycles.best or 2*cycles.
240 * Best: starts low, can only increase
242 * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
243 * if we run to end of time slot, can increase otherwise
246 * We could use just the average and save some work, however we want to
248 * able to adjust quickly to CPU pressure. Average wont shift much if
249 * daemon has been running a long time.
251 if (wq
->cycles
.granularity
== 0)
252 wq
->cycles
.granularity
= WORK_QUEUE_MIN_GRANULARITY
;
254 STAILQ_FOREACH_SAFE (item
, &wq
->items
, wq
, titem
) {
257 /* dont run items which are past their allowed retries */
258 if (item
->ran
> wq
->spec
.max_retries
) {
259 work_queue_item_remove(wq
, item
);
263 /* run and take care of items that want to be retried
266 ret
= wq
->spec
.workfunc(wq
, item
->data
);
268 } while ((ret
== WQ_RETRY_NOW
)
269 && (item
->ran
< wq
->spec
.max_retries
));
272 case WQ_QUEUE_BLOCKED
: {
273 /* decrement item->ran again, cause this isn't an item
274 * specific error, and fall through to WQ_RETRY_LATER
278 case WQ_RETRY_LATER
: {
283 work_queue_item_requeue(wq
, item
);
284 /* If a single node is being used with a meta-queue
286 * update the next node as we don't want to exit the
288 * reschedule it after every node. By definition,
290 * meant to continue the processing; the yield logic
292 * to terminate the thread when time has exceeded.
299 /* a RETRY_NOW that gets here has exceeded max_tries, same as
304 work_queue_item_remove(wq
, item
);
309 /* completed cycle */
312 /* test if we should yield */
313 if (!(cycles
% wq
->cycles
.granularity
) &&
314 event_should_yield(thread
)) {
322 #define WQ_HYSTERESIS_FACTOR 4
324 /* we yielded, check whether granularity should be reduced */
325 if (yielded
&& (cycles
< wq
->cycles
.granularity
)) {
326 wq
->cycles
.granularity
=
327 ((cycles
> 0) ? cycles
: WORK_QUEUE_MIN_GRANULARITY
);
329 /* otherwise, should granularity increase? */
330 else if (cycles
>= (wq
->cycles
.granularity
)) {
331 if (cycles
> wq
->cycles
.best
)
332 wq
->cycles
.best
= cycles
;
334 /* along with yielded check, provides hysteresis for granularity
336 if (cycles
> (wq
->cycles
.granularity
* WQ_HYSTERESIS_FACTOR
337 * WQ_HYSTERESIS_FACTOR
))
338 wq
->cycles
.granularity
*=
339 WQ_HYSTERESIS_FACTOR
; /* quick ramp-up */
341 > (wq
->cycles
.granularity
* WQ_HYSTERESIS_FACTOR
))
342 wq
->cycles
.granularity
+= WQ_HYSTERESIS_FACTOR
;
344 #undef WQ_HYSTERIS_FACTOR
347 wq
->cycles
.total
+= cycles
;
351 /* Is the queue done yet? If it is, call the completion callback. */
352 if (!work_queue_empty(wq
)) {
353 if (ret
== WQ_RETRY_LATER
||
354 ret
== WQ_QUEUE_BLOCKED
)
355 work_queue_schedule(wq
, wq
->spec
.retry
);
357 work_queue_schedule(wq
, 0);
359 } else if (wq
->spec
.completion_func
)
360 wq
->spec
.completion_func(wq
);