2 * Quagga Work Queue Support.
4 * Copyright (C) 2005 Sun Microsystems, Inc.
6 * This file is part of GNU Zebra.
8 * Quagga is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
13 * Quagga is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Quagga; see the file COPYING. If not, write to the Free
20 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
27 #include "workqueue.h"
32 DEFINE_MTYPE(LIB
, WORK_QUEUE
, "Work queue")
33 DEFINE_MTYPE_STATIC(LIB
, WORK_QUEUE_ITEM
, "Work queue item")
34 DEFINE_MTYPE_STATIC(LIB
, WORK_QUEUE_NAME
, "Work queue name string")
36 /* master list of work_queues */
37 static struct list _work_queues
;
38 /* pointer primarily to avoid an otherwise harmless warning on
39 * ALL_LIST_ELEMENTS_RO
41 static struct list
*work_queues
= &_work_queues
;
43 #define WORK_QUEUE_MIN_GRANULARITY 1
45 static struct work_queue_item
*
46 work_queue_item_new (struct work_queue
*wq
)
48 struct work_queue_item
*item
;
51 item
= XCALLOC (MTYPE_WORK_QUEUE_ITEM
,
52 sizeof (struct work_queue_item
));
58 work_queue_item_free (struct work_queue_item
*item
)
60 XFREE (MTYPE_WORK_QUEUE_ITEM
, item
);
64 /* create new work queue */
66 work_queue_new (struct thread_master
*m
, const char *queue_name
)
68 struct work_queue
*new;
70 new = XCALLOC (MTYPE_WORK_QUEUE
, sizeof (struct work_queue
));
75 new->name
= XSTRDUP (MTYPE_WORK_QUEUE_NAME
, queue_name
);
77 SET_FLAG (new->flags
, WQ_UNPLUGGED
);
79 if ( (new->items
= list_new ()) == NULL
)
81 XFREE (MTYPE_WORK_QUEUE_NAME
, new->name
);
82 XFREE (MTYPE_WORK_QUEUE
, new);
87 new->items
->del
= (void (*)(void *)) work_queue_item_free
;
89 listnode_add (work_queues
, new);
91 new->cycles
.granularity
= WORK_QUEUE_MIN_GRANULARITY
;
93 /* Default values, can be overriden by caller */
94 new->spec
.hold
= WORK_QUEUE_DEFAULT_HOLD
;
95 new->spec
.yield
= THREAD_YIELD_TIME_SLOT
;
101 work_queue_free (struct work_queue
*wq
)
103 if (wq
->thread
!= NULL
)
104 thread_cancel(wq
->thread
);
106 /* list_delete frees items via callback */
107 list_delete (wq
->items
);
108 listnode_delete (work_queues
, wq
);
110 XFREE (MTYPE_WORK_QUEUE_NAME
, wq
->name
);
111 XFREE (MTYPE_WORK_QUEUE
, wq
);
116 work_queue_is_scheduled (struct work_queue
*wq
)
118 return (wq
->thread
!= NULL
);
122 work_queue_schedule (struct work_queue
*wq
, unsigned int delay
)
124 /* if appropriate, schedule work queue thread */
125 if ( CHECK_FLAG (wq
->flags
, WQ_UNPLUGGED
)
126 && (wq
->thread
== NULL
)
127 && (listcount (wq
->items
) > 0) )
129 wq
->thread
= thread_add_background (wq
->master
, work_queue_run
,
131 /* set thread yield time, if needed */
132 if (wq
->thread
&& wq
->spec
.yield
!= THREAD_YIELD_TIME_SLOT
)
133 thread_set_yield_time (wq
->thread
, wq
->spec
.yield
);
141 work_queue_add (struct work_queue
*wq
, void *data
)
143 struct work_queue_item
*item
;
147 if (!(item
= work_queue_item_new (wq
)))
149 zlog_err ("%s: unable to get new queue item", __func__
);
154 listnode_add (wq
->items
, item
);
156 work_queue_schedule (wq
, wq
->spec
.hold
);
162 work_queue_item_remove (struct work_queue
*wq
, struct listnode
*ln
)
164 struct work_queue_item
*item
= listgetdata (ln
);
166 assert (item
&& item
->data
);
168 /* call private data deletion callback if needed */
169 if (wq
->spec
.del_item_data
)
170 wq
->spec
.del_item_data (wq
, item
->data
);
172 list_delete_node (wq
->items
, ln
);
173 work_queue_item_free (item
);
179 work_queue_item_requeue (struct work_queue
*wq
, struct listnode
*ln
)
181 LISTNODE_DETACH (wq
->items
, ln
);
182 LISTNODE_ATTACH (wq
->items
, ln
); /* attach to end of list */
185 DEFUN (show_work_queues
,
186 show_work_queues_cmd
,
189 "Work Queue information\n")
191 struct listnode
*node
;
192 struct work_queue
*wq
;
195 "%c %8s %5s %8s %8s %21s%s",
196 ' ', "List","(ms) ","Q. Runs","Yields","Cycle Counts ",
199 "%c %8s %5s %8s %8s %7s %6s %8s %6s %s%s",
204 "Best","Gran.","Total","Avg.",
208 for (ALL_LIST_ELEMENTS_RO (work_queues
, node
, wq
))
210 vty_out (vty
,"%c %8d %5d %8ld %8ld %7d %6d %8ld %6u %s%s",
211 (CHECK_FLAG (wq
->flags
, WQ_UNPLUGGED
) ? ' ' : 'P'),
212 listcount (wq
->items
),
214 wq
->runs
, wq
->yields
,
215 wq
->cycles
.best
, wq
->cycles
.granularity
, wq
->cycles
.total
,
217 (unsigned int) (wq
->cycles
.total
/ wq
->runs
) : 0,
225 /* 'plug' a queue: Stop it from being scheduled,
226 * ie: prevent the queue from draining.
229 work_queue_plug (struct work_queue
*wq
)
232 thread_cancel (wq
->thread
);
236 UNSET_FLAG (wq
->flags
, WQ_UNPLUGGED
);
239 /* unplug queue, schedule it again, if appropriate
240 * Ie: Allow the queue to be drained again
243 work_queue_unplug (struct work_queue
*wq
)
245 SET_FLAG (wq
->flags
, WQ_UNPLUGGED
);
247 /* if thread isnt already waiting, add one */
248 work_queue_schedule (wq
, wq
->spec
.hold
);
251 /* timer thread to process a work queue
252 * will reschedule itself if required,
253 * otherwise work_queue_item_add
256 work_queue_run (struct thread
*thread
)
258 struct work_queue
*wq
;
259 struct work_queue_item
*item
;
261 unsigned int cycles
= 0;
262 struct listnode
*node
, *nnode
;
265 wq
= THREAD_ARG (thread
);
268 assert (wq
&& wq
->items
);
270 /* calculate cycle granularity:
271 * list iteration == 1 run
272 * listnode processing == 1 cycle
273 * granularity == # cycles between checks whether we should yield.
275 * granularity should be > 0, and can increase slowly after each run to
276 * provide some hysteris, but not past cycles.best or 2*cycles.
278 * Best: starts low, can only increase
280 * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
281 * if we run to end of time slot, can increase otherwise
284 * We could use just the average and save some work, however we want to be
285 * able to adjust quickly to CPU pressure. Average wont shift much if
286 * daemon has been running a long time.
288 if (wq
->cycles
.granularity
== 0)
289 wq
->cycles
.granularity
= WORK_QUEUE_MIN_GRANULARITY
;
291 for (ALL_LIST_ELEMENTS (wq
->items
, node
, nnode
, item
))
293 assert (item
&& item
->data
);
295 /* dont run items which are past their allowed retries */
296 if (item
->ran
> wq
->spec
.max_retries
)
298 /* run error handler, if any */
299 if (wq
->spec
.errorfunc
)
300 wq
->spec
.errorfunc (wq
, item
->data
);
301 work_queue_item_remove (wq
, node
);
305 /* run and take care of items that want to be retried immediately */
308 ret
= wq
->spec
.workfunc (wq
, item
->data
);
311 while ((ret
== WQ_RETRY_NOW
)
312 && (item
->ran
< wq
->spec
.max_retries
));
316 case WQ_QUEUE_BLOCKED
:
318 /* decrement item->ran again, cause this isn't an item
319 * specific error, and fall through to WQ_RETRY_LATER
330 work_queue_item_requeue (wq
, node
);
331 /* If a single node is being used with a meta-queue (e.g., zebra),
332 * update the next node as we don't want to exit the thread and
333 * reschedule it after every node. By definition, WQ_REQUEUE is
334 * meant to continue the processing; the yield logic will kick in
335 * to terminate the thread when time has exceeded.
342 /* a RETRY_NOW that gets here has exceeded max_tries, same as ERROR */
345 if (wq
->spec
.errorfunc
)
346 wq
->spec
.errorfunc (wq
, item
);
348 /* fall through here is deliberate */
352 work_queue_item_remove (wq
, node
);
357 /* completed cycle */
360 /* test if we should yield */
361 if ( !(cycles
% wq
->cycles
.granularity
)
362 && thread_should_yield (thread
))
371 #define WQ_HYSTERESIS_FACTOR 4
373 /* we yielded, check whether granularity should be reduced */
374 if (yielded
&& (cycles
< wq
->cycles
.granularity
))
376 wq
->cycles
.granularity
= ((cycles
> 0) ? cycles
377 : WORK_QUEUE_MIN_GRANULARITY
);
379 /* otherwise, should granularity increase? */
380 else if (cycles
>= (wq
->cycles
.granularity
))
382 if (cycles
> wq
->cycles
.best
)
383 wq
->cycles
.best
= cycles
;
385 /* along with yielded check, provides hysteresis for granularity */
386 if (cycles
> (wq
->cycles
.granularity
* WQ_HYSTERESIS_FACTOR
387 * WQ_HYSTERESIS_FACTOR
))
388 wq
->cycles
.granularity
*= WQ_HYSTERESIS_FACTOR
; /* quick ramp-up */
389 else if (cycles
> (wq
->cycles
.granularity
* WQ_HYSTERESIS_FACTOR
))
390 wq
->cycles
.granularity
+= WQ_HYSTERESIS_FACTOR
;
392 #undef WQ_HYSTERIS_FACTOR
395 wq
->cycles
.total
+= cycles
;
400 printf ("%s: cycles %d, new: best %d, worst %d\n",
401 __func__
, cycles
, wq
->cycles
.best
, wq
->cycles
.granularity
);
404 /* Is the queue done yet? If it is, call the completion callback. */
405 if (listcount (wq
->items
) > 0)
406 work_queue_schedule (wq
, 0);
407 else if (wq
->spec
.completion_func
)
408 wq
->spec
.completion_func (wq
);