2 * Quagga Work Queue Support.
4 * Copyright (C) 2005 Sun Microsystems, Inc.
6 * This file is part of GNU Zebra.
8 * Quagga is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
13 * Quagga is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; see the file COPYING; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "workqueue.h"
31 DEFINE_MTYPE(LIB
, WORK_QUEUE
, "Work queue")
32 DEFINE_MTYPE_STATIC(LIB
, WORK_QUEUE_ITEM
, "Work queue item")
33 DEFINE_MTYPE_STATIC(LIB
, WORK_QUEUE_NAME
, "Work queue name string")
35 /* master list of work_queues */
36 static struct list _work_queues
;
37 /* pointer primarily to avoid an otherwise harmless warning on
38 * ALL_LIST_ELEMENTS_RO
40 static struct list
*work_queues
= &_work_queues
;
42 #define WORK_QUEUE_MIN_GRANULARITY 1
44 static struct work_queue_item
*work_queue_item_new(struct work_queue
*wq
)
46 struct work_queue_item
*item
;
49 item
= XCALLOC(MTYPE_WORK_QUEUE_ITEM
, sizeof(struct work_queue_item
));
54 static void work_queue_item_free(struct work_queue_item
*item
)
56 XFREE(MTYPE_WORK_QUEUE_ITEM
, item
);
60 static void work_queue_item_remove(struct work_queue
*wq
,
61 struct work_queue_item
*item
)
63 assert(item
&& item
->data
);
65 /* call private data deletion callback if needed */
66 if (wq
->spec
.del_item_data
)
67 wq
->spec
.del_item_data(wq
, item
->data
);
69 work_queue_item_dequeue(wq
, item
);
71 work_queue_item_free(item
);
76 /* create new work queue */
77 struct work_queue
*work_queue_new(struct thread_master
*m
,
78 const char *queue_name
)
80 struct work_queue
*new;
82 new = XCALLOC(MTYPE_WORK_QUEUE
, sizeof(struct work_queue
));
84 new->name
= XSTRDUP(MTYPE_WORK_QUEUE_NAME
, queue_name
);
86 SET_FLAG(new->flags
, WQ_UNPLUGGED
);
88 STAILQ_INIT(&new->items
);
90 listnode_add(work_queues
, new);
92 new->cycles
.granularity
= WORK_QUEUE_MIN_GRANULARITY
;
94 /* Default values, can be overridden by caller */
95 new->spec
.hold
= WORK_QUEUE_DEFAULT_HOLD
;
96 new->spec
.yield
= THREAD_YIELD_TIME_SLOT
;
97 new->spec
.retry
= WORK_QUEUE_DEFAULT_RETRY
;
102 void work_queue_free_original(struct work_queue
*wq
)
104 if (wq
->thread
!= NULL
)
105 thread_cancel(wq
->thread
);
107 while (!work_queue_empty(wq
)) {
108 struct work_queue_item
*item
= work_queue_last_item(wq
);
110 work_queue_item_remove(wq
, item
);
113 listnode_delete(work_queues
, wq
);
115 XFREE(MTYPE_WORK_QUEUE_NAME
, wq
->name
);
116 XFREE(MTYPE_WORK_QUEUE
, wq
);
120 void work_queue_free_and_null(struct work_queue
**wq
)
122 work_queue_free_original(*wq
);
126 bool work_queue_is_scheduled(struct work_queue
*wq
)
128 return (wq
->thread
!= NULL
);
131 static int work_queue_schedule(struct work_queue
*wq
, unsigned int delay
)
133 /* if appropriate, schedule work queue thread */
134 if (CHECK_FLAG(wq
->flags
, WQ_UNPLUGGED
) && (wq
->thread
== NULL
)
135 && !work_queue_empty(wq
)) {
138 /* Schedule timer if there's a delay, otherwise just schedule
142 thread_add_timer_msec(wq
->master
, work_queue_run
, wq
,
145 thread_add_event(wq
->master
, work_queue_run
, wq
, 0,
148 /* set thread yield time, if needed */
149 if (wq
->thread
&& wq
->spec
.yield
!= THREAD_YIELD_TIME_SLOT
)
150 thread_set_yield_time(wq
->thread
, wq
->spec
.yield
);
156 void work_queue_add(struct work_queue
*wq
, void *data
)
158 struct work_queue_item
*item
;
162 item
= work_queue_item_new(wq
);
165 work_queue_item_enqueue(wq
, item
);
167 work_queue_schedule(wq
, wq
->spec
.hold
);
172 static void work_queue_item_requeue(struct work_queue
*wq
,
173 struct work_queue_item
*item
)
175 work_queue_item_dequeue(wq
, item
);
177 /* attach to end of list */
178 work_queue_item_enqueue(wq
, item
);
181 DEFUN (show_work_queues
,
182 show_work_queues_cmd
,
185 "Work Queue information\n")
187 struct listnode
*node
;
188 struct work_queue
*wq
;
190 vty_out(vty
, "%c %8s %5s %8s %8s %21s\n", ' ', "List", "(ms) ",
191 "Q. Runs", "Yields", "Cycle Counts ");
192 vty_out(vty
, "%c %8s %5s %8s %8s %7s %6s %8s %6s %s\n", 'P', "Items",
193 "Hold", "Total", "Total", "Best", "Gran.", "Total", "Avg.",
196 for (ALL_LIST_ELEMENTS_RO(work_queues
, node
, wq
)) {
197 vty_out(vty
, "%c %8d %5d %8ld %8ld %7d %6d %8ld %6u %s\n",
198 (CHECK_FLAG(wq
->flags
, WQ_UNPLUGGED
) ? ' ' : 'P'),
199 work_queue_item_count(wq
), wq
->spec
.hold
, wq
->runs
,
200 wq
->yields
, wq
->cycles
.best
, wq
->cycles
.granularity
,
202 (wq
->runs
) ? (unsigned int)(wq
->cycles
.total
/ wq
->runs
)
210 void workqueue_cmd_init(void)
212 install_element(VIEW_NODE
, &show_work_queues_cmd
);
215 /* 'plug' a queue: Stop it from being scheduled,
216 * ie: prevent the queue from draining.
218 void work_queue_plug(struct work_queue
*wq
)
221 thread_cancel(wq
->thread
);
225 UNSET_FLAG(wq
->flags
, WQ_UNPLUGGED
);
228 /* unplug queue, schedule it again, if appropriate
229 * Ie: Allow the queue to be drained again
231 void work_queue_unplug(struct work_queue
*wq
)
233 SET_FLAG(wq
->flags
, WQ_UNPLUGGED
);
235 /* if thread isnt already waiting, add one */
236 work_queue_schedule(wq
, wq
->spec
.hold
);
239 /* timer thread to process a work queue
240 * will reschedule itself if required,
241 * otherwise work_queue_item_add
243 int work_queue_run(struct thread
*thread
)
245 struct work_queue
*wq
;
246 struct work_queue_item
*item
, *titem
;
247 wq_item_status ret
= WQ_SUCCESS
;
248 unsigned int cycles
= 0;
251 wq
= THREAD_ARG(thread
);
257 /* calculate cycle granularity:
258 * list iteration == 1 run
259 * listnode processing == 1 cycle
260 * granularity == # cycles between checks whether we should yield.
262 * granularity should be > 0, and can increase slowly after each run to
263 * provide some hysteris, but not past cycles.best or 2*cycles.
265 * Best: starts low, can only increase
267 * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
268 * if we run to end of time slot, can increase otherwise
271 * We could use just the average and save some work, however we want to
273 * able to adjust quickly to CPU pressure. Average wont shift much if
274 * daemon has been running a long time.
276 if (wq
->cycles
.granularity
== 0)
277 wq
->cycles
.granularity
= WORK_QUEUE_MIN_GRANULARITY
;
279 STAILQ_FOREACH_SAFE (item
, &wq
->items
, wq
, titem
) {
280 assert(item
&& item
->data
);
282 /* dont run items which are past their allowed retries */
283 if (item
->ran
> wq
->spec
.max_retries
) {
284 /* run error handler, if any */
285 if (wq
->spec
.errorfunc
)
286 wq
->spec
.errorfunc(wq
, item
->data
);
287 work_queue_item_remove(wq
, item
);
291 /* run and take care of items that want to be retried
294 ret
= wq
->spec
.workfunc(wq
, item
->data
);
296 } while ((ret
== WQ_RETRY_NOW
)
297 && (item
->ran
< wq
->spec
.max_retries
));
300 case WQ_QUEUE_BLOCKED
: {
301 /* decrement item->ran again, cause this isn't an item
302 * specific error, and fall through to WQ_RETRY_LATER
306 case WQ_RETRY_LATER
: {
311 work_queue_item_requeue(wq
, item
);
312 /* If a single node is being used with a meta-queue
314 * update the next node as we don't want to exit the
316 * reschedule it after every node. By definition,
318 * meant to continue the processing; the yield logic
320 * to terminate the thread when time has exceeded.
327 /* a RETRY_NOW that gets here has exceeded max_tries, same as
330 if (wq
->spec
.errorfunc
)
331 wq
->spec
.errorfunc(wq
, item
);
336 work_queue_item_remove(wq
, item
);
341 /* completed cycle */
344 /* test if we should yield */
345 if (!(cycles
% wq
->cycles
.granularity
)
346 && thread_should_yield(thread
)) {
354 #define WQ_HYSTERESIS_FACTOR 4
356 /* we yielded, check whether granularity should be reduced */
357 if (yielded
&& (cycles
< wq
->cycles
.granularity
)) {
358 wq
->cycles
.granularity
=
359 ((cycles
> 0) ? cycles
: WORK_QUEUE_MIN_GRANULARITY
);
361 /* otherwise, should granularity increase? */
362 else if (cycles
>= (wq
->cycles
.granularity
)) {
363 if (cycles
> wq
->cycles
.best
)
364 wq
->cycles
.best
= cycles
;
366 /* along with yielded check, provides hysteresis for granularity
368 if (cycles
> (wq
->cycles
.granularity
* WQ_HYSTERESIS_FACTOR
369 * WQ_HYSTERESIS_FACTOR
))
370 wq
->cycles
.granularity
*=
371 WQ_HYSTERESIS_FACTOR
; /* quick ramp-up */
373 > (wq
->cycles
.granularity
* WQ_HYSTERESIS_FACTOR
))
374 wq
->cycles
.granularity
+= WQ_HYSTERESIS_FACTOR
;
376 #undef WQ_HYSTERIS_FACTOR
379 wq
->cycles
.total
+= cycles
;
384 printf ("%s: cycles %d, new: best %d, worst %d\n",
385 __func__
, cycles
, wq
->cycles
.best
, wq
->cycles
.granularity
);
388 /* Is the queue done yet? If it is, call the completion callback. */
389 if (!work_queue_empty(wq
)) {
390 if (ret
== WQ_RETRY_LATER
||
391 ret
== WQ_QUEUE_BLOCKED
)
392 work_queue_schedule(wq
, wq
->spec
.retry
);
394 work_queue_schedule(wq
, 0);
396 } else if (wq
->spec
.completion_func
)
397 wq
->spec
.completion_func(wq
);