1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Quagga Work Queue Support.
5 * Copyright (C) 2005 Sun Microsystems, Inc.
11 #include "workqueue.h"
16 DEFINE_MTYPE(LIB
, WORK_QUEUE
, "Work queue");
17 DEFINE_MTYPE_STATIC(LIB
, WORK_QUEUE_ITEM
, "Work queue item");
18 DEFINE_MTYPE_STATIC(LIB
, WORK_QUEUE_NAME
, "Work queue name string");
20 /* master list of work_queues */
21 static struct list _work_queues
;
22 /* pointer primarily to avoid an otherwise harmless warning on
23 * ALL_LIST_ELEMENTS_RO
25 static struct list
*work_queues
= &_work_queues
;
27 #define WORK_QUEUE_MIN_GRANULARITY 1
29 static struct work_queue_item
*work_queue_item_new(struct work_queue
*wq
)
31 struct work_queue_item
*item
;
34 item
= XCALLOC(MTYPE_WORK_QUEUE_ITEM
, sizeof(struct work_queue_item
));
39 static void work_queue_item_free(struct work_queue_item
*item
)
41 XFREE(MTYPE_WORK_QUEUE_ITEM
, item
);
45 static void work_queue_item_remove(struct work_queue
*wq
,
46 struct work_queue_item
*item
)
48 assert(item
&& item
->data
);
50 /* call private data deletion callback if needed */
51 if (wq
->spec
.del_item_data
)
52 wq
->spec
.del_item_data(wq
, item
->data
);
54 work_queue_item_dequeue(wq
, item
);
56 work_queue_item_free(item
);
61 /* create new work queue */
62 struct work_queue
*work_queue_new(struct thread_master
*m
,
63 const char *queue_name
)
65 struct work_queue
*new;
67 new = XCALLOC(MTYPE_WORK_QUEUE
, sizeof(struct work_queue
));
69 new->name
= XSTRDUP(MTYPE_WORK_QUEUE_NAME
, queue_name
);
71 SET_FLAG(new->flags
, WQ_UNPLUGGED
);
73 STAILQ_INIT(&new->items
);
75 listnode_add(work_queues
, new);
77 new->cycles
.granularity
= WORK_QUEUE_MIN_GRANULARITY
;
79 /* Default values, can be overridden by caller */
80 new->spec
.hold
= WORK_QUEUE_DEFAULT_HOLD
;
81 new->spec
.yield
= THREAD_YIELD_TIME_SLOT
;
82 new->spec
.retry
= WORK_QUEUE_DEFAULT_RETRY
;
87 void work_queue_free_and_null(struct work_queue
**wqp
)
89 struct work_queue
*wq
= *wqp
;
91 THREAD_OFF(wq
->thread
);
93 while (!work_queue_empty(wq
)) {
94 struct work_queue_item
*item
= work_queue_last_item(wq
);
96 work_queue_item_remove(wq
, item
);
99 listnode_delete(work_queues
, wq
);
101 XFREE(MTYPE_WORK_QUEUE_NAME
, wq
->name
);
102 XFREE(MTYPE_WORK_QUEUE
, wq
);
107 bool work_queue_is_scheduled(struct work_queue
*wq
)
109 return thread_is_scheduled(wq
->thread
);
112 static int work_queue_schedule(struct work_queue
*wq
, unsigned int delay
)
114 /* if appropriate, schedule work queue thread */
115 if (CHECK_FLAG(wq
->flags
, WQ_UNPLUGGED
) &&
116 !thread_is_scheduled(wq
->thread
) && !work_queue_empty(wq
)) {
117 /* Schedule timer if there's a delay, otherwise just schedule
121 thread_add_timer_msec(wq
->master
, work_queue_run
, wq
,
123 thread_ignore_late_timer(wq
->thread
);
125 thread_add_event(wq
->master
, work_queue_run
, wq
, 0,
128 /* set thread yield time, if needed */
129 if (thread_is_scheduled(wq
->thread
) &&
130 wq
->spec
.yield
!= THREAD_YIELD_TIME_SLOT
)
131 thread_set_yield_time(wq
->thread
, wq
->spec
.yield
);
137 void work_queue_add(struct work_queue
*wq
, void *data
)
139 struct work_queue_item
*item
;
143 item
= work_queue_item_new(wq
);
146 work_queue_item_enqueue(wq
, item
);
148 work_queue_schedule(wq
, wq
->spec
.hold
);
153 static void work_queue_item_requeue(struct work_queue
*wq
,
154 struct work_queue_item
*item
)
156 work_queue_item_dequeue(wq
, item
);
158 /* attach to end of list */
159 work_queue_item_enqueue(wq
, item
);
162 DEFUN (show_work_queues
,
163 show_work_queues_cmd
,
166 "Work Queue information\n")
168 struct listnode
*node
;
169 struct work_queue
*wq
;
171 vty_out(vty
, "%c %8s %5s %8s %8s %21s\n", ' ', "List", "(ms) ",
172 "Q. Runs", "Yields", "Cycle Counts ");
173 vty_out(vty
, "%c %8s %5s %8s %8s %7s %6s %8s %6s %s\n", 'P', "Items",
174 "Hold", "Total", "Total", "Best", "Gran.", "Total", "Avg.",
177 for (ALL_LIST_ELEMENTS_RO(work_queues
, node
, wq
)) {
178 vty_out(vty
, "%c %8d %5d %8ld %8ld %7d %6d %8ld %6u %s\n",
179 (CHECK_FLAG(wq
->flags
, WQ_UNPLUGGED
) ? ' ' : 'P'),
180 work_queue_item_count(wq
), wq
->spec
.hold
, wq
->runs
,
181 wq
->yields
, wq
->cycles
.best
, wq
->cycles
.granularity
,
183 (wq
->runs
) ? (unsigned int)(wq
->cycles
.total
/ wq
->runs
)
191 void workqueue_cmd_init(void)
193 install_element(VIEW_NODE
, &show_work_queues_cmd
);
196 /* 'plug' a queue: Stop it from being scheduled,
197 * ie: prevent the queue from draining.
199 void work_queue_plug(struct work_queue
*wq
)
201 THREAD_OFF(wq
->thread
);
203 UNSET_FLAG(wq
->flags
, WQ_UNPLUGGED
);
206 /* unplug queue, schedule it again, if appropriate
207 * Ie: Allow the queue to be drained again
209 void work_queue_unplug(struct work_queue
*wq
)
211 SET_FLAG(wq
->flags
, WQ_UNPLUGGED
);
213 /* if thread isnt already waiting, add one */
214 work_queue_schedule(wq
, wq
->spec
.hold
);
217 /* timer thread to process a work queue
218 * will reschedule itself if required,
219 * otherwise work_queue_item_add
221 void work_queue_run(struct thread
*thread
)
223 struct work_queue
*wq
;
224 struct work_queue_item
*item
, *titem
;
225 wq_item_status ret
= WQ_SUCCESS
;
226 unsigned int cycles
= 0;
229 wq
= THREAD_ARG(thread
);
233 /* calculate cycle granularity:
234 * list iteration == 1 run
235 * listnode processing == 1 cycle
236 * granularity == # cycles between checks whether we should yield.
238 * granularity should be > 0, and can increase slowly after each run to
239 * provide some hysteris, but not past cycles.best or 2*cycles.
241 * Best: starts low, can only increase
243 * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
244 * if we run to end of time slot, can increase otherwise
247 * We could use just the average and save some work, however we want to
249 * able to adjust quickly to CPU pressure. Average wont shift much if
250 * daemon has been running a long time.
252 if (wq
->cycles
.granularity
== 0)
253 wq
->cycles
.granularity
= WORK_QUEUE_MIN_GRANULARITY
;
255 STAILQ_FOREACH_SAFE (item
, &wq
->items
, wq
, titem
) {
258 /* dont run items which are past their allowed retries */
259 if (item
->ran
> wq
->spec
.max_retries
) {
260 work_queue_item_remove(wq
, item
);
264 /* run and take care of items that want to be retried
267 ret
= wq
->spec
.workfunc(wq
, item
->data
);
269 } while ((ret
== WQ_RETRY_NOW
)
270 && (item
->ran
< wq
->spec
.max_retries
));
273 case WQ_QUEUE_BLOCKED
: {
274 /* decrement item->ran again, cause this isn't an item
275 * specific error, and fall through to WQ_RETRY_LATER
279 case WQ_RETRY_LATER
: {
284 work_queue_item_requeue(wq
, item
);
285 /* If a single node is being used with a meta-queue
287 * update the next node as we don't want to exit the
289 * reschedule it after every node. By definition,
291 * meant to continue the processing; the yield logic
293 * to terminate the thread when time has exceeded.
300 /* a RETRY_NOW that gets here has exceeded max_tries, same as
305 work_queue_item_remove(wq
, item
);
310 /* completed cycle */
313 /* test if we should yield */
314 if (!(cycles
% wq
->cycles
.granularity
)
315 && thread_should_yield(thread
)) {
323 #define WQ_HYSTERESIS_FACTOR 4
325 /* we yielded, check whether granularity should be reduced */
326 if (yielded
&& (cycles
< wq
->cycles
.granularity
)) {
327 wq
->cycles
.granularity
=
328 ((cycles
> 0) ? cycles
: WORK_QUEUE_MIN_GRANULARITY
);
330 /* otherwise, should granularity increase? */
331 else if (cycles
>= (wq
->cycles
.granularity
)) {
332 if (cycles
> wq
->cycles
.best
)
333 wq
->cycles
.best
= cycles
;
335 /* along with yielded check, provides hysteresis for granularity
337 if (cycles
> (wq
->cycles
.granularity
* WQ_HYSTERESIS_FACTOR
338 * WQ_HYSTERESIS_FACTOR
))
339 wq
->cycles
.granularity
*=
340 WQ_HYSTERESIS_FACTOR
; /* quick ramp-up */
342 > (wq
->cycles
.granularity
* WQ_HYSTERESIS_FACTOR
))
343 wq
->cycles
.granularity
+= WQ_HYSTERESIS_FACTOR
;
345 #undef WQ_HYSTERIS_FACTOR
348 wq
->cycles
.total
+= cycles
;
352 /* Is the queue done yet? If it is, call the completion callback. */
353 if (!work_queue_empty(wq
)) {
354 if (ret
== WQ_RETRY_LATER
||
355 ret
== WQ_QUEUE_BLOCKED
)
356 work_queue_schedule(wq
, wq
->spec
.retry
);
358 work_queue_schedule(wq
, 0);
360 } else if (wq
->spec
.completion_func
)
361 wq
->spec
.completion_func(wq
);