]> git.proxmox.com Git - mirror_frr.git/blame - lib/workqueue.c
*: remove THREAD_ON macros, add nullity check
[mirror_frr.git] / lib / workqueue.c
CommitLineData
354d119a 1/*
2 * Quagga Work Queue Support.
3 *
4 * Copyright (C) 2005 Sun Microsystems, Inc.
5 *
6 * This file is part of GNU Zebra.
7 *
8 * Quagga is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * Quagga is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with Quagga; see the file COPYING. If not, write to the Free
20 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
21 * 02111-1307, USA.
22 */
23
7a2fbbf0 24#include <zebra.h>
354d119a 25#include "thread.h"
26#include "memory.h"
27#include "workqueue.h"
28#include "linklist.h"
29#include "command.h"
30#include "log.h"
31
4a1ab8e4
DL
32DEFINE_MTYPE(LIB, WORK_QUEUE, "Work queue")
33DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_ITEM, "Work queue item")
34DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_NAME, "Work queue name string")
35
354d119a 36/* master list of work_queues */
24873f0c 37static struct list _work_queues;
1f9a9fff 38/* pointer primarily to avoid an otherwise harmless warning on
24873f0c
DS
39 * ALL_LIST_ELEMENTS_RO
40 */
41static struct list *work_queues = &_work_queues;
354d119a 42
43#define WORK_QUEUE_MIN_GRANULARITY 1
44
45static struct work_queue_item *
46work_queue_item_new (struct work_queue *wq)
47{
48 struct work_queue_item *item;
49 assert (wq);
50
51 item = XCALLOC (MTYPE_WORK_QUEUE_ITEM,
52 sizeof (struct work_queue_item));
53
54 return item;
55}
56
57static void
58work_queue_item_free (struct work_queue_item *item)
59{
60 XFREE (MTYPE_WORK_QUEUE_ITEM, item);
61 return;
62}
63
64/* create new work queue */
65struct work_queue *
66work_queue_new (struct thread_master *m, const char *queue_name)
67{
68 struct work_queue *new;
69
70 new = XCALLOC (MTYPE_WORK_QUEUE, sizeof (struct work_queue));
71
72 if (new == NULL)
73 return new;
74
75 new->name = XSTRDUP (MTYPE_WORK_QUEUE_NAME, queue_name);
76 new->master = m;
6ce80bdb 77 SET_FLAG (new->flags, WQ_UNPLUGGED);
354d119a 78
79 if ( (new->items = list_new ()) == NULL)
80 {
354d119a 81 XFREE (MTYPE_WORK_QUEUE_NAME, new->name);
82 XFREE (MTYPE_WORK_QUEUE, new);
83
84 return NULL;
85 }
86
87 new->items->del = (void (*)(void *)) work_queue_item_free;
88
24873f0c 89 listnode_add (work_queues, new);
354d119a 90
91 new->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
190880dc 92
93 /* Default values, can be overriden by caller */
190880dc 94 new->spec.hold = WORK_QUEUE_DEFAULT_HOLD;
50596be0 95 new->spec.yield = THREAD_YIELD_TIME_SLOT;
190880dc 96
354d119a 97 return new;
98}
99
100void
101work_queue_free (struct work_queue *wq)
102{
acde4b86
SH
103 if (wq->thread != NULL)
104 thread_cancel(wq->thread);
105
354d119a 106 /* list_delete frees items via callback */
107 list_delete (wq->items);
24873f0c 108 listnode_delete (work_queues, wq);
354d119a 109
110 XFREE (MTYPE_WORK_QUEUE_NAME, wq->name);
111 XFREE (MTYPE_WORK_QUEUE, wq);
112 return;
113}
114
86582682
PJ
115bool
116work_queue_is_scheduled (struct work_queue *wq)
117{
118 return (wq->thread != NULL);
119}
120
f63f06da 121static int
269d74fd 122work_queue_schedule (struct work_queue *wq, unsigned int delay)
123{
124 /* if appropriate, schedule work queue thread */
6ce80bdb 125 if ( CHECK_FLAG (wq->flags, WQ_UNPLUGGED)
269d74fd 126 && (wq->thread == NULL)
127 && (listcount (wq->items) > 0) )
128 {
ffa2c898
QY
129 wq->thread = thread_add_background(wq->master, work_queue_run, wq,
130 delay, NULL);
50596be0
DS
131 /* set thread yield time, if needed */
132 if (wq->thread && wq->spec.yield != THREAD_YIELD_TIME_SLOT)
133 thread_set_yield_time (wq->thread, wq->spec.yield);
269d74fd 134 return 1;
135 }
136 else
137 return 0;
138}
139
354d119a 140void
141work_queue_add (struct work_queue *wq, void *data)
142{
143 struct work_queue_item *item;
144
145 assert (wq);
146
147 if (!(item = work_queue_item_new (wq)))
148 {
149 zlog_err ("%s: unable to get new queue item", __func__);
150 return;
151 }
152
153 item->data = data;
e96f9203 154 listnode_add (wq->items, item);
354d119a 155
306d8890 156 work_queue_schedule (wq, wq->spec.hold);
354d119a 157
158 return;
159}
160
161static void
162work_queue_item_remove (struct work_queue *wq, struct listnode *ln)
163{
164 struct work_queue_item *item = listgetdata (ln);
165
166 assert (item && item->data);
167
168 /* call private data deletion callback if needed */
169 if (wq->spec.del_item_data)
889e9311 170 wq->spec.del_item_data (wq, item->data);
354d119a 171
172 list_delete_node (wq->items, ln);
173 work_queue_item_free (item);
174
175 return;
176}
177
178static void
179work_queue_item_requeue (struct work_queue *wq, struct listnode *ln)
180{
181 LISTNODE_DETACH (wq->items, ln);
182 LISTNODE_ATTACH (wq->items, ln); /* attach to end of list */
183}
184
49d41a26
DS
185DEFUN (show_work_queues,
186 show_work_queues_cmd,
187 "show work-queues",
188 SHOW_STR
189 "Work Queue information\n")
354d119a 190{
191 struct listnode *node;
192 struct work_queue *wq;
354d119a 193
194 vty_out (vty,
50596be0
DS
195 "%c %8s %5s %8s %8s %21s%s",
196 ' ', "List","(ms) ","Q. Runs","Yields","Cycle Counts ",
354d119a 197 VTY_NEWLINE);
198 vty_out (vty,
50596be0 199 "%c %8s %5s %8s %8s %7s %6s %8s %6s %s%s",
306d8890 200 'P',
354d119a 201 "Items",
306d8890 202 "Hold",
50596be0
DS
203 "Total","Total",
204 "Best","Gran.","Total","Avg.",
354d119a 205 "Name",
206 VTY_NEWLINE);
207
24873f0c 208 for (ALL_LIST_ELEMENTS_RO (work_queues, node, wq))
354d119a 209 {
50596be0 210 vty_out (vty,"%c %8d %5d %8ld %8ld %7d %6d %8ld %6u %s%s",
6ce80bdb 211 (CHECK_FLAG (wq->flags, WQ_UNPLUGGED) ? ' ' : 'P'),
354d119a 212 listcount (wq->items),
306d8890 213 wq->spec.hold,
50596be0
DS
214 wq->runs, wq->yields,
215 wq->cycles.best, wq->cycles.granularity, wq->cycles.total,
84369684 216 (wq->runs) ?
217 (unsigned int) (wq->cycles.total / wq->runs) : 0,
354d119a 218 wq->name,
219 VTY_NEWLINE);
220 }
221
222 return CMD_SUCCESS;
223}
224
0b84f294
DL
225void
226workqueue_cmd_init (void)
227{
228 install_element (VIEW_NODE, &show_work_queues_cmd);
229}
230
269d74fd 231/* 'plug' a queue: Stop it from being scheduled,
232 * ie: prevent the queue from draining.
233 */
234void
235work_queue_plug (struct work_queue *wq)
236{
237 if (wq->thread)
238 thread_cancel (wq->thread);
239
240 wq->thread = NULL;
241
6ce80bdb 242 UNSET_FLAG (wq->flags, WQ_UNPLUGGED);
269d74fd 243}
244
245/* unplug queue, schedule it again, if appropriate
246 * Ie: Allow the queue to be drained again
247 */
248void
249work_queue_unplug (struct work_queue *wq)
250{
6ce80bdb 251 SET_FLAG (wq->flags, WQ_UNPLUGGED);
269d74fd 252
253 /* if thread isnt already waiting, add one */
306d8890 254 work_queue_schedule (wq, wq->spec.hold);
269d74fd 255}
256
354d119a 257/* timer thread to process a work queue
258 * will reschedule itself if required,
259 * otherwise work_queue_item_add
260 */
261int
262work_queue_run (struct thread *thread)
263{
264 struct work_queue *wq;
265 struct work_queue_item *item;
266 wq_item_status ret;
267 unsigned int cycles = 0;
268 struct listnode *node, *nnode;
269 char yielded = 0;
270
271 wq = THREAD_ARG (thread);
272 wq->thread = NULL;
273
274 assert (wq && wq->items);
275
276 /* calculate cycle granularity:
50596be0
DS
277 * list iteration == 1 run
278 * listnode processing == 1 cycle
354d119a 279 * granularity == # cycles between checks whether we should yield.
280 *
281 * granularity should be > 0, and can increase slowly after each run to
282 * provide some hysteris, but not past cycles.best or 2*cycles.
283 *
284 * Best: starts low, can only increase
285 *
213d8dad
PJ
286 * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
287 * if we run to end of time slot, can increase otherwise
288 * by a small factor.
354d119a 289 *
290 * We could use just the average and save some work, however we want to be
291 * able to adjust quickly to CPU pressure. Average wont shift much if
292 * daemon has been running a long time.
293 */
294 if (wq->cycles.granularity == 0)
295 wq->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
296
297 for (ALL_LIST_ELEMENTS (wq->items, node, nnode, item))
298 {
299 assert (item && item->data);
300
301 /* dont run items which are past their allowed retries */
84369684 302 if (item->ran > wq->spec.max_retries)
354d119a 303 {
304 /* run error handler, if any */
305 if (wq->spec.errorfunc)
306 wq->spec.errorfunc (wq, item->data);
307 work_queue_item_remove (wq, node);
308 continue;
309 }
310
311 /* run and take care of items that want to be retried immediately */
312 do
313 {
889e9311 314 ret = wq->spec.workfunc (wq, item->data);
84369684 315 item->ran++;
354d119a 316 }
317 while ((ret == WQ_RETRY_NOW)
84369684 318 && (item->ran < wq->spec.max_retries));
354d119a 319
320 switch (ret)
321 {
269d74fd 322 case WQ_QUEUE_BLOCKED:
323 {
324 /* decrement item->ran again, cause this isn't an item
325 * specific error, and fall through to WQ_RETRY_LATER
326 */
327 item->ran--;
328 }
354d119a 329 case WQ_RETRY_LATER:
330 {
354d119a 331 goto stats;
332 }
333 case WQ_REQUEUE:
334 {
e96f9203 335 item->ran--;
354d119a 336 work_queue_item_requeue (wq, node);
50596be0
DS
337 /* If a single node is being used with a meta-queue (e.g., zebra),
338 * update the next node as we don't want to exit the thread and
339 * reschedule it after every node. By definition, WQ_REQUEUE is
340 * meant to continue the processing; the yield logic will kick in
341 * to terminate the thread when time has exceeded.
342 */
343 if (nnode == NULL)
344 nnode = node;
354d119a 345 break;
346 }
347 case WQ_RETRY_NOW:
269d74fd 348 /* a RETRY_NOW that gets here has exceeded max_tries, same as ERROR */
354d119a 349 case WQ_ERROR:
350 {
351 if (wq->spec.errorfunc)
352 wq->spec.errorfunc (wq, item);
353 }
354 /* fall through here is deliberate */
355 case WQ_SUCCESS:
356 default:
357 {
358 work_queue_item_remove (wq, node);
359 break;
360 }
361 }
362
363 /* completed cycle */
364 cycles++;
365
366 /* test if we should yield */
367 if ( !(cycles % wq->cycles.granularity)
368 && thread_should_yield (thread))
369 {
370 yielded = 1;
371 goto stats;
372 }
373 }
374
375stats:
376
3322055b 377#define WQ_HYSTERESIS_FACTOR 4
354d119a 378
379 /* we yielded, check whether granularity should be reduced */
380 if (yielded && (cycles < wq->cycles.granularity))
381 {
50596be0 382 wq->cycles.granularity = ((cycles > 0) ? cycles
354d119a 383 : WORK_QUEUE_MIN_GRANULARITY);
384 }
3322055b
PJ
385 /* otherwise, should granularity increase? */
386 else if (cycles >= (wq->cycles.granularity))
354d119a 387 {
388 if (cycles > wq->cycles.best)
389 wq->cycles.best = cycles;
50596be0 390
3322055b
PJ
391 /* along with yielded check, provides hysteresis for granularity */
392 if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR
393 * WQ_HYSTERESIS_FACTOR))
394 wq->cycles.granularity *= WQ_HYSTERESIS_FACTOR; /* quick ramp-up */
395 else if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR))
396 wq->cycles.granularity += WQ_HYSTERESIS_FACTOR;
354d119a 397 }
398#undef WQ_HYSTERIS_FACTOR
399
400 wq->runs++;
401 wq->cycles.total += cycles;
50596be0
DS
402 if (yielded)
403 wq->yields++;
354d119a 404
405#if 0
406 printf ("%s: cycles %d, new: best %d, worst %d\n",
407 __func__, cycles, wq->cycles.best, wq->cycles.granularity);
408#endif
409
269d74fd 410 /* Is the queue done yet? If it is, call the completion callback. */
354d119a 411 if (listcount (wq->items) > 0)
306d8890 412 work_queue_schedule (wq, 0);
413 else if (wq->spec.completion_func)
414 wq->spec.completion_func (wq);
269d74fd 415
354d119a 416 return 0;
417}