]> git.proxmox.com Git - mirror_frr.git/blob - lib/workqueue.c
Merge pull request #762 from bingen/mutiple_nh_recursive_levels
[mirror_frr.git] / lib / workqueue.c
1 /*
2 * Quagga Work Queue Support.
3 *
4 * Copyright (C) 2005 Sun Microsystems, Inc.
5 *
6 * This file is part of GNU Zebra.
7 *
8 * Quagga is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * Quagga is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; see the file COPYING; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <zebra.h>
24 #include "thread.h"
25 #include "memory.h"
26 #include "workqueue.h"
27 #include "linklist.h"
28 #include "command.h"
29 #include "log.h"
30
31 DEFINE_MTYPE(LIB, WORK_QUEUE, "Work queue")
32 DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_ITEM, "Work queue item")
33 DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_NAME, "Work queue name string")
34
35 /* master list of work_queues */
36 static struct list _work_queues;
37 /* pointer primarily to avoid an otherwise harmless warning on
38 * ALL_LIST_ELEMENTS_RO
39 */
40 static struct list *work_queues = &_work_queues;
41
42 #define WORK_QUEUE_MIN_GRANULARITY 1
43
44 static struct work_queue_item *
45 work_queue_item_new (struct work_queue *wq)
46 {
47 struct work_queue_item *item;
48 assert (wq);
49
50 item = XCALLOC (MTYPE_WORK_QUEUE_ITEM,
51 sizeof (struct work_queue_item));
52
53 return item;
54 }
55
56 static void
57 work_queue_item_free (struct work_queue_item *item)
58 {
59 XFREE (MTYPE_WORK_QUEUE_ITEM, item);
60 return;
61 }
62
63 /* create new work queue */
64 struct work_queue *
65 work_queue_new (struct thread_master *m, const char *queue_name)
66 {
67 struct work_queue *new;
68
69 new = XCALLOC (MTYPE_WORK_QUEUE, sizeof (struct work_queue));
70
71 if (new == NULL)
72 return new;
73
74 new->name = XSTRDUP (MTYPE_WORK_QUEUE_NAME, queue_name);
75 new->master = m;
76 SET_FLAG (new->flags, WQ_UNPLUGGED);
77
78 if ( (new->items = list_new ()) == NULL)
79 {
80 XFREE (MTYPE_WORK_QUEUE_NAME, new->name);
81 XFREE (MTYPE_WORK_QUEUE, new);
82
83 return NULL;
84 }
85
86 new->items->del = (void (*)(void *)) work_queue_item_free;
87
88 listnode_add (work_queues, new);
89
90 new->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
91
92 /* Default values, can be overriden by caller */
93 new->spec.hold = WORK_QUEUE_DEFAULT_HOLD;
94 new->spec.yield = THREAD_YIELD_TIME_SLOT;
95
96 return new;
97 }
98
99 void
100 work_queue_free (struct work_queue *wq)
101 {
102 if (wq->thread != NULL)
103 thread_cancel(wq->thread);
104
105 /* list_delete frees items via callback */
106 list_delete (wq->items);
107 listnode_delete (work_queues, wq);
108
109 XFREE (MTYPE_WORK_QUEUE_NAME, wq->name);
110 XFREE (MTYPE_WORK_QUEUE, wq);
111 return;
112 }
113
114 bool
115 work_queue_is_scheduled (struct work_queue *wq)
116 {
117 return (wq->thread != NULL);
118 }
119
120 static int
121 work_queue_schedule (struct work_queue *wq, unsigned int delay)
122 {
123 /* if appropriate, schedule work queue thread */
124 if ( CHECK_FLAG (wq->flags, WQ_UNPLUGGED)
125 && (wq->thread == NULL)
126 && (listcount (wq->items) > 0) )
127 {
128 wq->thread = NULL;
129 thread_add_timer_msec (wq->master, work_queue_run, wq, delay,
130 &wq->thread);
131 /* set thread yield time, if needed */
132 if (wq->thread && wq->spec.yield != THREAD_YIELD_TIME_SLOT)
133 thread_set_yield_time (wq->thread, wq->spec.yield);
134 return 1;
135 }
136 else
137 return 0;
138 }
139
140 void
141 work_queue_add (struct work_queue *wq, void *data)
142 {
143 struct work_queue_item *item;
144
145 assert (wq);
146
147 if (!(item = work_queue_item_new (wq)))
148 {
149 zlog_err ("%s: unable to get new queue item", __func__);
150 return;
151 }
152
153 item->data = data;
154 listnode_add (wq->items, item);
155
156 work_queue_schedule (wq, wq->spec.hold);
157
158 return;
159 }
160
161 static void
162 work_queue_item_remove (struct work_queue *wq, struct listnode *ln)
163 {
164 struct work_queue_item *item = listgetdata (ln);
165
166 assert (item && item->data);
167
168 /* call private data deletion callback if needed */
169 if (wq->spec.del_item_data)
170 wq->spec.del_item_data (wq, item->data);
171
172 list_delete_node (wq->items, ln);
173 work_queue_item_free (item);
174
175 return;
176 }
177
178 static void
179 work_queue_item_requeue (struct work_queue *wq, struct listnode *ln)
180 {
181 LISTNODE_DETACH (wq->items, ln);
182 LISTNODE_ATTACH (wq->items, ln); /* attach to end of list */
183 }
184
185 DEFUN (show_work_queues,
186 show_work_queues_cmd,
187 "show work-queues",
188 SHOW_STR
189 "Work Queue information\n")
190 {
191 struct listnode *node;
192 struct work_queue *wq;
193
194 vty_outln (vty,
195 "%c %8s %5s %8s %8s %21s",
196 ' ', "List","(ms) ","Q. Runs","Yields","Cycle Counts ");
197 vty_outln (vty,
198 "%c %8s %5s %8s %8s %7s %6s %8s %6s %s",
199 'P',
200 "Items",
201 "Hold",
202 "Total","Total",
203 "Best","Gran.","Total","Avg.",
204 "Name");
205
206 for (ALL_LIST_ELEMENTS_RO (work_queues, node, wq))
207 {
208 vty_outln (vty,"%c %8d %5d %8ld %8ld %7d %6d %8ld %6u %s",
209 (CHECK_FLAG (wq->flags, WQ_UNPLUGGED) ? ' ' : 'P'),
210 listcount (wq->items),
211 wq->spec.hold,
212 wq->runs, wq->yields,
213 wq->cycles.best, wq->cycles.granularity, wq->cycles.total,
214 (wq->runs) ?
215 (unsigned int) (wq->cycles.total / wq->runs) : 0,
216 wq->name);
217 }
218
219 return CMD_SUCCESS;
220 }
221
222 void
223 workqueue_cmd_init (void)
224 {
225 install_element (VIEW_NODE, &show_work_queues_cmd);
226 }
227
228 /* 'plug' a queue: Stop it from being scheduled,
229 * ie: prevent the queue from draining.
230 */
231 void
232 work_queue_plug (struct work_queue *wq)
233 {
234 if (wq->thread)
235 thread_cancel (wq->thread);
236
237 wq->thread = NULL;
238
239 UNSET_FLAG (wq->flags, WQ_UNPLUGGED);
240 }
241
242 /* unplug queue, schedule it again, if appropriate
243 * Ie: Allow the queue to be drained again
244 */
245 void
246 work_queue_unplug (struct work_queue *wq)
247 {
248 SET_FLAG (wq->flags, WQ_UNPLUGGED);
249
250 /* if thread isnt already waiting, add one */
251 work_queue_schedule (wq, wq->spec.hold);
252 }
253
254 /* timer thread to process a work queue
255 * will reschedule itself if required,
256 * otherwise work_queue_item_add
257 */
258 int
259 work_queue_run (struct thread *thread)
260 {
261 struct work_queue *wq;
262 struct work_queue_item *item;
263 wq_item_status ret;
264 unsigned int cycles = 0;
265 struct listnode *node, *nnode;
266 char yielded = 0;
267
268 wq = THREAD_ARG (thread);
269 wq->thread = NULL;
270
271 assert (wq && wq->items);
272
273 /* calculate cycle granularity:
274 * list iteration == 1 run
275 * listnode processing == 1 cycle
276 * granularity == # cycles between checks whether we should yield.
277 *
278 * granularity should be > 0, and can increase slowly after each run to
279 * provide some hysteris, but not past cycles.best or 2*cycles.
280 *
281 * Best: starts low, can only increase
282 *
283 * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
284 * if we run to end of time slot, can increase otherwise
285 * by a small factor.
286 *
287 * We could use just the average and save some work, however we want to be
288 * able to adjust quickly to CPU pressure. Average wont shift much if
289 * daemon has been running a long time.
290 */
291 if (wq->cycles.granularity == 0)
292 wq->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
293
294 for (ALL_LIST_ELEMENTS (wq->items, node, nnode, item))
295 {
296 assert (item && item->data);
297
298 /* dont run items which are past their allowed retries */
299 if (item->ran > wq->spec.max_retries)
300 {
301 /* run error handler, if any */
302 if (wq->spec.errorfunc)
303 wq->spec.errorfunc (wq, item->data);
304 work_queue_item_remove (wq, node);
305 continue;
306 }
307
308 /* run and take care of items that want to be retried immediately */
309 do
310 {
311 ret = wq->spec.workfunc (wq, item->data);
312 item->ran++;
313 }
314 while ((ret == WQ_RETRY_NOW)
315 && (item->ran < wq->spec.max_retries));
316
317 switch (ret)
318 {
319 case WQ_QUEUE_BLOCKED:
320 {
321 /* decrement item->ran again, cause this isn't an item
322 * specific error, and fall through to WQ_RETRY_LATER
323 */
324 item->ran--;
325 }
326 case WQ_RETRY_LATER:
327 {
328 goto stats;
329 }
330 case WQ_REQUEUE:
331 {
332 item->ran--;
333 work_queue_item_requeue (wq, node);
334 /* If a single node is being used with a meta-queue (e.g., zebra),
335 * update the next node as we don't want to exit the thread and
336 * reschedule it after every node. By definition, WQ_REQUEUE is
337 * meant to continue the processing; the yield logic will kick in
338 * to terminate the thread when time has exceeded.
339 */
340 if (nnode == NULL)
341 nnode = node;
342 break;
343 }
344 case WQ_RETRY_NOW:
345 /* a RETRY_NOW that gets here has exceeded max_tries, same as ERROR */
346 case WQ_ERROR:
347 {
348 if (wq->spec.errorfunc)
349 wq->spec.errorfunc (wq, item);
350 }
351 /* fall through here is deliberate */
352 case WQ_SUCCESS:
353 default:
354 {
355 work_queue_item_remove (wq, node);
356 break;
357 }
358 }
359
360 /* completed cycle */
361 cycles++;
362
363 /* test if we should yield */
364 if ( !(cycles % wq->cycles.granularity)
365 && thread_should_yield (thread))
366 {
367 yielded = 1;
368 goto stats;
369 }
370 }
371
372 stats:
373
374 #define WQ_HYSTERESIS_FACTOR 4
375
376 /* we yielded, check whether granularity should be reduced */
377 if (yielded && (cycles < wq->cycles.granularity))
378 {
379 wq->cycles.granularity = ((cycles > 0) ? cycles
380 : WORK_QUEUE_MIN_GRANULARITY);
381 }
382 /* otherwise, should granularity increase? */
383 else if (cycles >= (wq->cycles.granularity))
384 {
385 if (cycles > wq->cycles.best)
386 wq->cycles.best = cycles;
387
388 /* along with yielded check, provides hysteresis for granularity */
389 if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR
390 * WQ_HYSTERESIS_FACTOR))
391 wq->cycles.granularity *= WQ_HYSTERESIS_FACTOR; /* quick ramp-up */
392 else if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR))
393 wq->cycles.granularity += WQ_HYSTERESIS_FACTOR;
394 }
395 #undef WQ_HYSTERIS_FACTOR
396
397 wq->runs++;
398 wq->cycles.total += cycles;
399 if (yielded)
400 wq->yields++;
401
402 #if 0
403 printf ("%s: cycles %d, new: best %d, worst %d\n",
404 __func__, cycles, wq->cycles.best, wq->cycles.granularity);
405 #endif
406
407 /* Is the queue done yet? If it is, call the completion callback. */
408 if (listcount (wq->items) > 0)
409 work_queue_schedule (wq, 0);
410 else if (wq->spec.completion_func)
411 wq->spec.completion_func (wq);
412
413 return 0;
414 }