]> git.proxmox.com Git - mirror_frr.git/blob - lib/workqueue.c
*: make consistent & update GPLv2 file headers
[mirror_frr.git] / lib / workqueue.c
1 /*
2 * Quagga Work Queue Support.
3 *
4 * Copyright (C) 2005 Sun Microsystems, Inc.
5 *
6 * This file is part of GNU Zebra.
7 *
8 * Quagga is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * Quagga is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; see the file COPYING; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <zebra.h>
24 #include "thread.h"
25 #include "memory.h"
26 #include "workqueue.h"
27 #include "linklist.h"
28 #include "command.h"
29 #include "log.h"
30
31 DEFINE_MTYPE(LIB, WORK_QUEUE, "Work queue")
32 DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_ITEM, "Work queue item")
33 DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_NAME, "Work queue name string")
34
35 /* master list of work_queues */
36 static struct list _work_queues;
37 /* pointer primarily to avoid an otherwise harmless warning on
38 * ALL_LIST_ELEMENTS_RO
39 */
40 static struct list *work_queues = &_work_queues;
41
42 #define WORK_QUEUE_MIN_GRANULARITY 1
43
44 static struct work_queue_item *
45 work_queue_item_new (struct work_queue *wq)
46 {
47 struct work_queue_item *item;
48 assert (wq);
49
50 item = XCALLOC (MTYPE_WORK_QUEUE_ITEM,
51 sizeof (struct work_queue_item));
52
53 return item;
54 }
55
56 static void
57 work_queue_item_free (struct work_queue_item *item)
58 {
59 XFREE (MTYPE_WORK_QUEUE_ITEM, item);
60 return;
61 }
62
63 /* create new work queue */
64 struct work_queue *
65 work_queue_new (struct thread_master *m, const char *queue_name)
66 {
67 struct work_queue *new;
68
69 new = XCALLOC (MTYPE_WORK_QUEUE, sizeof (struct work_queue));
70
71 if (new == NULL)
72 return new;
73
74 new->name = XSTRDUP (MTYPE_WORK_QUEUE_NAME, queue_name);
75 new->master = m;
76 SET_FLAG (new->flags, WQ_UNPLUGGED);
77
78 if ( (new->items = list_new ()) == NULL)
79 {
80 XFREE (MTYPE_WORK_QUEUE_NAME, new->name);
81 XFREE (MTYPE_WORK_QUEUE, new);
82
83 return NULL;
84 }
85
86 new->items->del = (void (*)(void *)) work_queue_item_free;
87
88 listnode_add (work_queues, new);
89
90 new->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
91
92 /* Default values, can be overriden by caller */
93 new->spec.hold = WORK_QUEUE_DEFAULT_HOLD;
94 new->spec.yield = THREAD_YIELD_TIME_SLOT;
95
96 return new;
97 }
98
99 void
100 work_queue_free (struct work_queue *wq)
101 {
102 if (wq->thread != NULL)
103 thread_cancel(wq->thread);
104
105 /* list_delete frees items via callback */
106 list_delete (wq->items);
107 listnode_delete (work_queues, wq);
108
109 XFREE (MTYPE_WORK_QUEUE_NAME, wq->name);
110 XFREE (MTYPE_WORK_QUEUE, wq);
111 return;
112 }
113
114 bool
115 work_queue_is_scheduled (struct work_queue *wq)
116 {
117 return (wq->thread != NULL);
118 }
119
120 static int
121 work_queue_schedule (struct work_queue *wq, unsigned int delay)
122 {
123 /* if appropriate, schedule work queue thread */
124 if ( CHECK_FLAG (wq->flags, WQ_UNPLUGGED)
125 && (wq->thread == NULL)
126 && (listcount (wq->items) > 0) )
127 {
128 wq->thread = NULL;
129 thread_add_background(wq->master, work_queue_run, wq, delay,
130 &wq->thread);
131 /* set thread yield time, if needed */
132 if (wq->thread && wq->spec.yield != THREAD_YIELD_TIME_SLOT)
133 thread_set_yield_time (wq->thread, wq->spec.yield);
134 return 1;
135 }
136 else
137 return 0;
138 }
139
140 void
141 work_queue_add (struct work_queue *wq, void *data)
142 {
143 struct work_queue_item *item;
144
145 assert (wq);
146
147 if (!(item = work_queue_item_new (wq)))
148 {
149 zlog_err ("%s: unable to get new queue item", __func__);
150 return;
151 }
152
153 item->data = data;
154 listnode_add (wq->items, item);
155
156 work_queue_schedule (wq, wq->spec.hold);
157
158 return;
159 }
160
161 static void
162 work_queue_item_remove (struct work_queue *wq, struct listnode *ln)
163 {
164 struct work_queue_item *item = listgetdata (ln);
165
166 assert (item && item->data);
167
168 /* call private data deletion callback if needed */
169 if (wq->spec.del_item_data)
170 wq->spec.del_item_data (wq, item->data);
171
172 list_delete_node (wq->items, ln);
173 work_queue_item_free (item);
174
175 return;
176 }
177
178 static void
179 work_queue_item_requeue (struct work_queue *wq, struct listnode *ln)
180 {
181 LISTNODE_DETACH (wq->items, ln);
182 LISTNODE_ATTACH (wq->items, ln); /* attach to end of list */
183 }
184
185 DEFUN (show_work_queues,
186 show_work_queues_cmd,
187 "show work-queues",
188 SHOW_STR
189 "Work Queue information\n")
190 {
191 struct listnode *node;
192 struct work_queue *wq;
193
194 vty_out (vty,
195 "%c %8s %5s %8s %8s %21s%s",
196 ' ', "List","(ms) ","Q. Runs","Yields","Cycle Counts ",
197 VTY_NEWLINE);
198 vty_out (vty,
199 "%c %8s %5s %8s %8s %7s %6s %8s %6s %s%s",
200 'P',
201 "Items",
202 "Hold",
203 "Total","Total",
204 "Best","Gran.","Total","Avg.",
205 "Name",
206 VTY_NEWLINE);
207
208 for (ALL_LIST_ELEMENTS_RO (work_queues, node, wq))
209 {
210 vty_out (vty,"%c %8d %5d %8ld %8ld %7d %6d %8ld %6u %s%s",
211 (CHECK_FLAG (wq->flags, WQ_UNPLUGGED) ? ' ' : 'P'),
212 listcount (wq->items),
213 wq->spec.hold,
214 wq->runs, wq->yields,
215 wq->cycles.best, wq->cycles.granularity, wq->cycles.total,
216 (wq->runs) ?
217 (unsigned int) (wq->cycles.total / wq->runs) : 0,
218 wq->name,
219 VTY_NEWLINE);
220 }
221
222 return CMD_SUCCESS;
223 }
224
225 void
226 workqueue_cmd_init (void)
227 {
228 install_element (VIEW_NODE, &show_work_queues_cmd);
229 }
230
231 /* 'plug' a queue: Stop it from being scheduled,
232 * ie: prevent the queue from draining.
233 */
234 void
235 work_queue_plug (struct work_queue *wq)
236 {
237 if (wq->thread)
238 thread_cancel (wq->thread);
239
240 wq->thread = NULL;
241
242 UNSET_FLAG (wq->flags, WQ_UNPLUGGED);
243 }
244
245 /* unplug queue, schedule it again, if appropriate
246 * Ie: Allow the queue to be drained again
247 */
248 void
249 work_queue_unplug (struct work_queue *wq)
250 {
251 SET_FLAG (wq->flags, WQ_UNPLUGGED);
252
253 /* if thread isnt already waiting, add one */
254 work_queue_schedule (wq, wq->spec.hold);
255 }
256
257 /* timer thread to process a work queue
258 * will reschedule itself if required,
259 * otherwise work_queue_item_add
260 */
261 int
262 work_queue_run (struct thread *thread)
263 {
264 struct work_queue *wq;
265 struct work_queue_item *item;
266 wq_item_status ret;
267 unsigned int cycles = 0;
268 struct listnode *node, *nnode;
269 char yielded = 0;
270
271 wq = THREAD_ARG (thread);
272 wq->thread = NULL;
273
274 assert (wq && wq->items);
275
276 /* calculate cycle granularity:
277 * list iteration == 1 run
278 * listnode processing == 1 cycle
279 * granularity == # cycles between checks whether we should yield.
280 *
281 * granularity should be > 0, and can increase slowly after each run to
282 * provide some hysteris, but not past cycles.best or 2*cycles.
283 *
284 * Best: starts low, can only increase
285 *
286 * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
287 * if we run to end of time slot, can increase otherwise
288 * by a small factor.
289 *
290 * We could use just the average and save some work, however we want to be
291 * able to adjust quickly to CPU pressure. Average wont shift much if
292 * daemon has been running a long time.
293 */
294 if (wq->cycles.granularity == 0)
295 wq->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
296
297 for (ALL_LIST_ELEMENTS (wq->items, node, nnode, item))
298 {
299 assert (item && item->data);
300
301 /* dont run items which are past their allowed retries */
302 if (item->ran > wq->spec.max_retries)
303 {
304 /* run error handler, if any */
305 if (wq->spec.errorfunc)
306 wq->spec.errorfunc (wq, item->data);
307 work_queue_item_remove (wq, node);
308 continue;
309 }
310
311 /* run and take care of items that want to be retried immediately */
312 do
313 {
314 ret = wq->spec.workfunc (wq, item->data);
315 item->ran++;
316 }
317 while ((ret == WQ_RETRY_NOW)
318 && (item->ran < wq->spec.max_retries));
319
320 switch (ret)
321 {
322 case WQ_QUEUE_BLOCKED:
323 {
324 /* decrement item->ran again, cause this isn't an item
325 * specific error, and fall through to WQ_RETRY_LATER
326 */
327 item->ran--;
328 }
329 case WQ_RETRY_LATER:
330 {
331 goto stats;
332 }
333 case WQ_REQUEUE:
334 {
335 item->ran--;
336 work_queue_item_requeue (wq, node);
337 /* If a single node is being used with a meta-queue (e.g., zebra),
338 * update the next node as we don't want to exit the thread and
339 * reschedule it after every node. By definition, WQ_REQUEUE is
340 * meant to continue the processing; the yield logic will kick in
341 * to terminate the thread when time has exceeded.
342 */
343 if (nnode == NULL)
344 nnode = node;
345 break;
346 }
347 case WQ_RETRY_NOW:
348 /* a RETRY_NOW that gets here has exceeded max_tries, same as ERROR */
349 case WQ_ERROR:
350 {
351 if (wq->spec.errorfunc)
352 wq->spec.errorfunc (wq, item);
353 }
354 /* fall through here is deliberate */
355 case WQ_SUCCESS:
356 default:
357 {
358 work_queue_item_remove (wq, node);
359 break;
360 }
361 }
362
363 /* completed cycle */
364 cycles++;
365
366 /* test if we should yield */
367 if ( !(cycles % wq->cycles.granularity)
368 && thread_should_yield (thread))
369 {
370 yielded = 1;
371 goto stats;
372 }
373 }
374
375 stats:
376
377 #define WQ_HYSTERESIS_FACTOR 4
378
379 /* we yielded, check whether granularity should be reduced */
380 if (yielded && (cycles < wq->cycles.granularity))
381 {
382 wq->cycles.granularity = ((cycles > 0) ? cycles
383 : WORK_QUEUE_MIN_GRANULARITY);
384 }
385 /* otherwise, should granularity increase? */
386 else if (cycles >= (wq->cycles.granularity))
387 {
388 if (cycles > wq->cycles.best)
389 wq->cycles.best = cycles;
390
391 /* along with yielded check, provides hysteresis for granularity */
392 if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR
393 * WQ_HYSTERESIS_FACTOR))
394 wq->cycles.granularity *= WQ_HYSTERESIS_FACTOR; /* quick ramp-up */
395 else if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR))
396 wq->cycles.granularity += WQ_HYSTERESIS_FACTOR;
397 }
398 #undef WQ_HYSTERIS_FACTOR
399
400 wq->runs++;
401 wq->cycles.total += cycles;
402 if (yielded)
403 wq->yields++;
404
405 #if 0
406 printf ("%s: cycles %d, new: best %d, worst %d\n",
407 __func__, cycles, wq->cycles.best, wq->cycles.granularity);
408 #endif
409
410 /* Is the queue done yet? If it is, call the completion callback. */
411 if (listcount (wq->items) > 0)
412 work_queue_schedule (wq, 0);
413 else if (wq->spec.completion_func)
414 wq->spec.completion_func (wq);
415
416 return 0;
417 }