2 * Quagga Work Queue Support.
4 * Copyright (C) 2005 Sun Microsystems, Inc.
6 * This file is part of GNU Zebra.
8 * Quagga is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
13 * Quagga is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; see the file COPYING; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "workqueue.h"
31 DEFINE_MTYPE(LIB
, WORK_QUEUE
, "Work queue")
32 DEFINE_MTYPE_STATIC(LIB
, WORK_QUEUE_ITEM
, "Work queue item")
33 DEFINE_MTYPE_STATIC(LIB
, WORK_QUEUE_NAME
, "Work queue name string")
35 /* master list of work_queues */
36 static struct list _work_queues
;
37 /* pointer primarily to avoid an otherwise harmless warning on
38 * ALL_LIST_ELEMENTS_RO
40 static struct list
*work_queues
= &_work_queues
;
42 #define WORK_QUEUE_MIN_GRANULARITY 1
44 static struct work_queue_item
*work_queue_item_new(struct work_queue
*wq
)
46 struct work_queue_item
*item
;
49 item
= XCALLOC(MTYPE_WORK_QUEUE_ITEM
, sizeof(struct work_queue_item
));
54 static void work_queue_item_free(struct work_queue_item
*item
)
56 XFREE(MTYPE_WORK_QUEUE_ITEM
, item
);
60 /* create new work queue */
61 struct work_queue
*work_queue_new(struct thread_master
*m
,
62 const char *queue_name
)
64 struct work_queue
*new;
66 new = XCALLOC(MTYPE_WORK_QUEUE
, sizeof(struct work_queue
));
71 new->name
= XSTRDUP(MTYPE_WORK_QUEUE_NAME
, queue_name
);
73 SET_FLAG(new->flags
, WQ_UNPLUGGED
);
75 if ((new->items
= list_new()) == NULL
) {
76 XFREE(MTYPE_WORK_QUEUE_NAME
, new->name
);
77 XFREE(MTYPE_WORK_QUEUE
, new);
82 new->items
->del
= (void (*)(void *))work_queue_item_free
;
84 listnode_add(work_queues
, new);
86 new->cycles
.granularity
= WORK_QUEUE_MIN_GRANULARITY
;
88 /* Default values, can be overriden by caller */
89 new->spec
.hold
= WORK_QUEUE_DEFAULT_HOLD
;
90 new->spec
.yield
= THREAD_YIELD_TIME_SLOT
;
95 void work_queue_free(struct work_queue
*wq
)
97 if (wq
->thread
!= NULL
)
98 thread_cancel(wq
->thread
);
100 /* list_delete frees items via callback */
101 list_delete(wq
->items
);
102 listnode_delete(work_queues
, wq
);
104 XFREE(MTYPE_WORK_QUEUE_NAME
, wq
->name
);
105 XFREE(MTYPE_WORK_QUEUE
, wq
);
109 bool work_queue_is_scheduled(struct work_queue
*wq
)
111 return (wq
->thread
!= NULL
);
114 static int work_queue_schedule(struct work_queue
*wq
, unsigned int delay
)
116 /* if appropriate, schedule work queue thread */
117 if (CHECK_FLAG(wq
->flags
, WQ_UNPLUGGED
) && (wq
->thread
== NULL
)
118 && (listcount(wq
->items
) > 0)) {
120 thread_add_timer_msec(wq
->master
, work_queue_run
, wq
, delay
,
122 /* set thread yield time, if needed */
123 if (wq
->thread
&& wq
->spec
.yield
!= THREAD_YIELD_TIME_SLOT
)
124 thread_set_yield_time(wq
->thread
, wq
->spec
.yield
);
130 void work_queue_add(struct work_queue
*wq
, void *data
)
132 struct work_queue_item
*item
;
136 if (!(item
= work_queue_item_new(wq
))) {
137 zlog_err("%s: unable to get new queue item", __func__
);
142 listnode_add(wq
->items
, item
);
144 work_queue_schedule(wq
, wq
->spec
.hold
);
149 static void work_queue_item_remove(struct work_queue
*wq
, struct listnode
*ln
)
151 struct work_queue_item
*item
= listgetdata(ln
);
153 assert(item
&& item
->data
);
155 /* call private data deletion callback if needed */
156 if (wq
->spec
.del_item_data
)
157 wq
->spec
.del_item_data(wq
, item
->data
);
159 list_delete_node(wq
->items
, ln
);
160 work_queue_item_free(item
);
165 static void work_queue_item_requeue(struct work_queue
*wq
, struct listnode
*ln
)
167 LISTNODE_DETACH(wq
->items
, ln
);
168 LISTNODE_ATTACH(wq
->items
, ln
); /* attach to end of list */
171 DEFUN (show_work_queues
,
172 show_work_queues_cmd
,
175 "Work Queue information\n")
177 struct listnode
*node
;
178 struct work_queue
*wq
;
180 vty_out(vty
, "%c %8s %5s %8s %8s %21s\n", ' ', "List", "(ms) ",
181 "Q. Runs", "Yields", "Cycle Counts ");
182 vty_out(vty
, "%c %8s %5s %8s %8s %7s %6s %8s %6s %s\n", 'P', "Items",
183 "Hold", "Total", "Total", "Best", "Gran.", "Total", "Avg.",
186 for (ALL_LIST_ELEMENTS_RO(work_queues
, node
, wq
)) {
187 vty_out(vty
, "%c %8d %5d %8ld %8ld %7d %6d %8ld %6u %s\n",
188 (CHECK_FLAG(wq
->flags
, WQ_UNPLUGGED
) ? ' ' : 'P'),
189 listcount(wq
->items
), wq
->spec
.hold
, wq
->runs
,
190 wq
->yields
, wq
->cycles
.best
, wq
->cycles
.granularity
,
192 (wq
->runs
) ? (unsigned int)(wq
->cycles
.total
/ wq
->runs
)
200 void workqueue_cmd_init(void)
202 install_element(VIEW_NODE
, &show_work_queues_cmd
);
205 /* 'plug' a queue: Stop it from being scheduled,
206 * ie: prevent the queue from draining.
208 void work_queue_plug(struct work_queue
*wq
)
211 thread_cancel(wq
->thread
);
215 UNSET_FLAG(wq
->flags
, WQ_UNPLUGGED
);
218 /* unplug queue, schedule it again, if appropriate
219 * Ie: Allow the queue to be drained again
221 void work_queue_unplug(struct work_queue
*wq
)
223 SET_FLAG(wq
->flags
, WQ_UNPLUGGED
);
225 /* if thread isnt already waiting, add one */
226 work_queue_schedule(wq
, wq
->spec
.hold
);
229 /* timer thread to process a work queue
230 * will reschedule itself if required,
231 * otherwise work_queue_item_add
233 int work_queue_run(struct thread
*thread
)
235 struct work_queue
*wq
;
236 struct work_queue_item
*item
;
238 unsigned int cycles
= 0;
239 struct listnode
*node
, *nnode
;
242 wq
= THREAD_ARG(thread
);
245 assert(wq
&& wq
->items
);
247 /* calculate cycle granularity:
248 * list iteration == 1 run
249 * listnode processing == 1 cycle
250 * granularity == # cycles between checks whether we should yield.
252 * granularity should be > 0, and can increase slowly after each run to
253 * provide some hysteris, but not past cycles.best or 2*cycles.
255 * Best: starts low, can only increase
257 * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
258 * if we run to end of time slot, can increase otherwise
261 * We could use just the average and save some work, however we want to
263 * able to adjust quickly to CPU pressure. Average wont shift much if
264 * daemon has been running a long time.
266 if (wq
->cycles
.granularity
== 0)
267 wq
->cycles
.granularity
= WORK_QUEUE_MIN_GRANULARITY
;
269 for (ALL_LIST_ELEMENTS(wq
->items
, node
, nnode
, item
)) {
270 assert(item
&& item
->data
);
272 /* dont run items which are past their allowed retries */
273 if (item
->ran
> wq
->spec
.max_retries
) {
274 /* run error handler, if any */
275 if (wq
->spec
.errorfunc
)
276 wq
->spec
.errorfunc(wq
, item
->data
);
277 work_queue_item_remove(wq
, node
);
281 /* run and take care of items that want to be retried
284 ret
= wq
->spec
.workfunc(wq
, item
->data
);
286 } while ((ret
== WQ_RETRY_NOW
)
287 && (item
->ran
< wq
->spec
.max_retries
));
290 case WQ_QUEUE_BLOCKED
: {
291 /* decrement item->ran again, cause this isn't an item
292 * specific error, and fall through to WQ_RETRY_LATER
296 case WQ_RETRY_LATER
: {
301 work_queue_item_requeue(wq
, node
);
302 /* If a single node is being used with a meta-queue
304 * update the next node as we don't want to exit the
306 * reschedule it after every node. By definition,
308 * meant to continue the processing; the yield logic
310 * to terminate the thread when time has exceeded.
317 /* a RETRY_NOW that gets here has exceeded max_tries, same as
320 if (wq
->spec
.errorfunc
)
321 wq
->spec
.errorfunc(wq
, item
);
326 work_queue_item_remove(wq
, node
);
331 /* completed cycle */
334 /* test if we should yield */
335 if (!(cycles
% wq
->cycles
.granularity
)
336 && thread_should_yield(thread
)) {
344 #define WQ_HYSTERESIS_FACTOR 4
346 /* we yielded, check whether granularity should be reduced */
347 if (yielded
&& (cycles
< wq
->cycles
.granularity
)) {
348 wq
->cycles
.granularity
=
349 ((cycles
> 0) ? cycles
: WORK_QUEUE_MIN_GRANULARITY
);
351 /* otherwise, should granularity increase? */
352 else if (cycles
>= (wq
->cycles
.granularity
)) {
353 if (cycles
> wq
->cycles
.best
)
354 wq
->cycles
.best
= cycles
;
356 /* along with yielded check, provides hysteresis for granularity
358 if (cycles
> (wq
->cycles
.granularity
* WQ_HYSTERESIS_FACTOR
359 * WQ_HYSTERESIS_FACTOR
))
360 wq
->cycles
.granularity
*=
361 WQ_HYSTERESIS_FACTOR
; /* quick ramp-up */
363 > (wq
->cycles
.granularity
* WQ_HYSTERESIS_FACTOR
))
364 wq
->cycles
.granularity
+= WQ_HYSTERESIS_FACTOR
;
366 #undef WQ_HYSTERIS_FACTOR
369 wq
->cycles
.total
+= cycles
;
374 printf ("%s: cycles %d, new: best %d, worst %d\n",
375 __func__
, cycles
, wq
->cycles
.best
, wq
->cycles
.granularity
);
378 /* Is the queue done yet? If it is, call the completion callback. */
379 if (listcount(wq
->items
) > 0)
380 work_queue_schedule(wq
, 0);
381 else if (wq
->spec
.completion_func
)
382 wq
->spec
.completion_func(wq
);