]> git.proxmox.com Git - mirror_frr.git/blob - lib/workqueue.c
ldpd: use red-black trees to store 'tnbr' elements
[mirror_frr.git] / lib / workqueue.c
1 /*
2 * Quagga Work Queue Support.
3 *
4 * Copyright (C) 2005 Sun Microsystems, Inc.
5 *
6 * This file is part of GNU Zebra.
7 *
8 * Quagga is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * Quagga is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with Quagga; see the file COPYING. If not, write to the Free
20 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
21 * 02111-1307, USA.
22 */
23
24 #include <zebra.h>
25 #include "thread.h"
26 #include "memory.h"
27 #include "workqueue.h"
28 #include "linklist.h"
29 #include "command.h"
30 #include "log.h"
31
32 DEFINE_MTYPE(LIB, WORK_QUEUE, "Work queue")
33 DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_ITEM, "Work queue item")
34 DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_NAME, "Work queue name string")
35
36 /* master list of work_queues */
37 static struct list _work_queues;
38 /* pointer primarily to avoid an otherwise harmless warning on
39 * ALL_LIST_ELEMENTS_RO
40 */
41 static struct list *work_queues = &_work_queues;
42
43 #define WORK_QUEUE_MIN_GRANULARITY 1
44
45 static struct work_queue_item *
46 work_queue_item_new (struct work_queue *wq)
47 {
48 struct work_queue_item *item;
49 assert (wq);
50
51 item = XCALLOC (MTYPE_WORK_QUEUE_ITEM,
52 sizeof (struct work_queue_item));
53
54 return item;
55 }
56
57 static void
58 work_queue_item_free (struct work_queue_item *item)
59 {
60 XFREE (MTYPE_WORK_QUEUE_ITEM, item);
61 return;
62 }
63
64 /* create new work queue */
65 struct work_queue *
66 work_queue_new (struct thread_master *m, const char *queue_name)
67 {
68 struct work_queue *new;
69
70 new = XCALLOC (MTYPE_WORK_QUEUE, sizeof (struct work_queue));
71
72 if (new == NULL)
73 return new;
74
75 new->name = XSTRDUP (MTYPE_WORK_QUEUE_NAME, queue_name);
76 new->master = m;
77 SET_FLAG (new->flags, WQ_UNPLUGGED);
78
79 if ( (new->items = list_new ()) == NULL)
80 {
81 XFREE (MTYPE_WORK_QUEUE_NAME, new->name);
82 XFREE (MTYPE_WORK_QUEUE, new);
83
84 return NULL;
85 }
86
87 new->items->del = (void (*)(void *)) work_queue_item_free;
88
89 listnode_add (work_queues, new);
90
91 new->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
92
93 /* Default values, can be overriden by caller */
94 new->spec.hold = WORK_QUEUE_DEFAULT_HOLD;
95 new->spec.yield = THREAD_YIELD_TIME_SLOT;
96
97 return new;
98 }
99
100 void
101 work_queue_free (struct work_queue *wq)
102 {
103 if (wq->thread != NULL)
104 thread_cancel(wq->thread);
105
106 /* list_delete frees items via callback */
107 list_delete (wq->items);
108 listnode_delete (work_queues, wq);
109
110 XFREE (MTYPE_WORK_QUEUE_NAME, wq->name);
111 XFREE (MTYPE_WORK_QUEUE, wq);
112 return;
113 }
114
115 bool
116 work_queue_is_scheduled (struct work_queue *wq)
117 {
118 return (wq->thread != NULL);
119 }
120
121 static int
122 work_queue_schedule (struct work_queue *wq, unsigned int delay)
123 {
124 /* if appropriate, schedule work queue thread */
125 if ( CHECK_FLAG (wq->flags, WQ_UNPLUGGED)
126 && (wq->thread == NULL)
127 && (listcount (wq->items) > 0) )
128 {
129 wq->thread = thread_add_background (wq->master, work_queue_run,
130 wq, delay);
131 /* set thread yield time, if needed */
132 if (wq->thread && wq->spec.yield != THREAD_YIELD_TIME_SLOT)
133 thread_set_yield_time (wq->thread, wq->spec.yield);
134 return 1;
135 }
136 else
137 return 0;
138 }
139
140 void
141 work_queue_add (struct work_queue *wq, void *data)
142 {
143 struct work_queue_item *item;
144
145 assert (wq);
146
147 if (!(item = work_queue_item_new (wq)))
148 {
149 zlog_err ("%s: unable to get new queue item", __func__);
150 return;
151 }
152
153 item->data = data;
154 listnode_add (wq->items, item);
155
156 work_queue_schedule (wq, wq->spec.hold);
157
158 return;
159 }
160
161 static void
162 work_queue_item_remove (struct work_queue *wq, struct listnode *ln)
163 {
164 struct work_queue_item *item = listgetdata (ln);
165
166 assert (item && item->data);
167
168 /* call private data deletion callback if needed */
169 if (wq->spec.del_item_data)
170 wq->spec.del_item_data (wq, item->data);
171
172 list_delete_node (wq->items, ln);
173 work_queue_item_free (item);
174
175 return;
176 }
177
178 static void
179 work_queue_item_requeue (struct work_queue *wq, struct listnode *ln)
180 {
181 LISTNODE_DETACH (wq->items, ln);
182 LISTNODE_ATTACH (wq->items, ln); /* attach to end of list */
183 }
184
185 DEFUN (show_work_queues,
186 show_work_queues_cmd,
187 "show work-queues",
188 SHOW_STR
189 "Work Queue information\n")
190 {
191 struct listnode *node;
192 struct work_queue *wq;
193
194 vty_out (vty,
195 "%c %8s %5s %8s %8s %21s%s",
196 ' ', "List","(ms) ","Q. Runs","Yields","Cycle Counts ",
197 VTY_NEWLINE);
198 vty_out (vty,
199 "%c %8s %5s %8s %8s %7s %6s %8s %6s %s%s",
200 'P',
201 "Items",
202 "Hold",
203 "Total","Total",
204 "Best","Gran.","Total","Avg.",
205 "Name",
206 VTY_NEWLINE);
207
208 for (ALL_LIST_ELEMENTS_RO (work_queues, node, wq))
209 {
210 vty_out (vty,"%c %8d %5d %8ld %8ld %7d %6d %8ld %6u %s%s",
211 (CHECK_FLAG (wq->flags, WQ_UNPLUGGED) ? ' ' : 'P'),
212 listcount (wq->items),
213 wq->spec.hold,
214 wq->runs, wq->yields,
215 wq->cycles.best, wq->cycles.granularity, wq->cycles.total,
216 (wq->runs) ?
217 (unsigned int) (wq->cycles.total / wq->runs) : 0,
218 wq->name,
219 VTY_NEWLINE);
220 }
221
222 return CMD_SUCCESS;
223 }
224
225 /* 'plug' a queue: Stop it from being scheduled,
226 * ie: prevent the queue from draining.
227 */
228 void
229 work_queue_plug (struct work_queue *wq)
230 {
231 if (wq->thread)
232 thread_cancel (wq->thread);
233
234 wq->thread = NULL;
235
236 UNSET_FLAG (wq->flags, WQ_UNPLUGGED);
237 }
238
239 /* unplug queue, schedule it again, if appropriate
240 * Ie: Allow the queue to be drained again
241 */
242 void
243 work_queue_unplug (struct work_queue *wq)
244 {
245 SET_FLAG (wq->flags, WQ_UNPLUGGED);
246
247 /* if thread isnt already waiting, add one */
248 work_queue_schedule (wq, wq->spec.hold);
249 }
250
251 /* timer thread to process a work queue
252 * will reschedule itself if required,
253 * otherwise work_queue_item_add
254 */
255 int
256 work_queue_run (struct thread *thread)
257 {
258 struct work_queue *wq;
259 struct work_queue_item *item;
260 wq_item_status ret;
261 unsigned int cycles = 0;
262 struct listnode *node, *nnode;
263 char yielded = 0;
264
265 wq = THREAD_ARG (thread);
266 wq->thread = NULL;
267
268 assert (wq && wq->items);
269
270 /* calculate cycle granularity:
271 * list iteration == 1 run
272 * listnode processing == 1 cycle
273 * granularity == # cycles between checks whether we should yield.
274 *
275 * granularity should be > 0, and can increase slowly after each run to
276 * provide some hysteris, but not past cycles.best or 2*cycles.
277 *
278 * Best: starts low, can only increase
279 *
280 * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
281 * if we run to end of time slot, can increase otherwise
282 * by a small factor.
283 *
284 * We could use just the average and save some work, however we want to be
285 * able to adjust quickly to CPU pressure. Average wont shift much if
286 * daemon has been running a long time.
287 */
288 if (wq->cycles.granularity == 0)
289 wq->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
290
291 for (ALL_LIST_ELEMENTS (wq->items, node, nnode, item))
292 {
293 assert (item && item->data);
294
295 /* dont run items which are past their allowed retries */
296 if (item->ran > wq->spec.max_retries)
297 {
298 /* run error handler, if any */
299 if (wq->spec.errorfunc)
300 wq->spec.errorfunc (wq, item->data);
301 work_queue_item_remove (wq, node);
302 continue;
303 }
304
305 /* run and take care of items that want to be retried immediately */
306 do
307 {
308 ret = wq->spec.workfunc (wq, item->data);
309 item->ran++;
310 }
311 while ((ret == WQ_RETRY_NOW)
312 && (item->ran < wq->spec.max_retries));
313
314 switch (ret)
315 {
316 case WQ_QUEUE_BLOCKED:
317 {
318 /* decrement item->ran again, cause this isn't an item
319 * specific error, and fall through to WQ_RETRY_LATER
320 */
321 item->ran--;
322 }
323 case WQ_RETRY_LATER:
324 {
325 goto stats;
326 }
327 case WQ_REQUEUE:
328 {
329 item->ran--;
330 work_queue_item_requeue (wq, node);
331 /* If a single node is being used with a meta-queue (e.g., zebra),
332 * update the next node as we don't want to exit the thread and
333 * reschedule it after every node. By definition, WQ_REQUEUE is
334 * meant to continue the processing; the yield logic will kick in
335 * to terminate the thread when time has exceeded.
336 */
337 if (nnode == NULL)
338 nnode = node;
339 break;
340 }
341 case WQ_RETRY_NOW:
342 /* a RETRY_NOW that gets here has exceeded max_tries, same as ERROR */
343 case WQ_ERROR:
344 {
345 if (wq->spec.errorfunc)
346 wq->spec.errorfunc (wq, item);
347 }
348 /* fall through here is deliberate */
349 case WQ_SUCCESS:
350 default:
351 {
352 work_queue_item_remove (wq, node);
353 break;
354 }
355 }
356
357 /* completed cycle */
358 cycles++;
359
360 /* test if we should yield */
361 if ( !(cycles % wq->cycles.granularity)
362 && thread_should_yield (thread))
363 {
364 yielded = 1;
365 goto stats;
366 }
367 }
368
369 stats:
370
371 #define WQ_HYSTERESIS_FACTOR 4
372
373 /* we yielded, check whether granularity should be reduced */
374 if (yielded && (cycles < wq->cycles.granularity))
375 {
376 wq->cycles.granularity = ((cycles > 0) ? cycles
377 : WORK_QUEUE_MIN_GRANULARITY);
378 }
379 /* otherwise, should granularity increase? */
380 else if (cycles >= (wq->cycles.granularity))
381 {
382 if (cycles > wq->cycles.best)
383 wq->cycles.best = cycles;
384
385 /* along with yielded check, provides hysteresis for granularity */
386 if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR
387 * WQ_HYSTERESIS_FACTOR))
388 wq->cycles.granularity *= WQ_HYSTERESIS_FACTOR; /* quick ramp-up */
389 else if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR))
390 wq->cycles.granularity += WQ_HYSTERESIS_FACTOR;
391 }
392 #undef WQ_HYSTERIS_FACTOR
393
394 wq->runs++;
395 wq->cycles.total += cycles;
396 if (yielded)
397 wq->yields++;
398
399 #if 0
400 printf ("%s: cycles %d, new: best %d, worst %d\n",
401 __func__, cycles, wq->cycles.best, wq->cycles.granularity);
402 #endif
403
404 /* Is the queue done yet? If it is, call the completion callback. */
405 if (listcount (wq->items) > 0)
406 work_queue_schedule (wq, 0);
407 else if (wq->spec.completion_func)
408 wq->spec.completion_func (wq);
409
410 return 0;
411 }