]> git.proxmox.com Git - mirror_frr.git/blob - lib/workqueue.c
Merge pull request #1825 from chiragshah6/ospfv3_dev
[mirror_frr.git] / lib / workqueue.c
1 /*
2 * Quagga Work Queue Support.
3 *
4 * Copyright (C) 2005 Sun Microsystems, Inc.
5 *
6 * This file is part of GNU Zebra.
7 *
8 * Quagga is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * Quagga is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; see the file COPYING; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <zebra.h>
24 #include "thread.h"
25 #include "memory.h"
26 #include "workqueue.h"
27 #include "linklist.h"
28 #include "command.h"
29 #include "log.h"
30
31 DEFINE_MTYPE(LIB, WORK_QUEUE, "Work queue")
32 DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_ITEM, "Work queue item")
33 DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_NAME, "Work queue name string")
34
35 /* master list of work_queues */
36 static struct list _work_queues;
37 /* pointer primarily to avoid an otherwise harmless warning on
38 * ALL_LIST_ELEMENTS_RO
39 */
40 static struct list *work_queues = &_work_queues;
41
42 #define WORK_QUEUE_MIN_GRANULARITY 1
43
44 static struct work_queue_item *work_queue_item_new(struct work_queue *wq)
45 {
46 struct work_queue_item *item;
47 assert(wq);
48
49 item = XCALLOC(MTYPE_WORK_QUEUE_ITEM, sizeof(struct work_queue_item));
50
51 return item;
52 }
53
54 static void work_queue_item_free(struct work_queue_item *item)
55 {
56 XFREE(MTYPE_WORK_QUEUE_ITEM, item);
57 return;
58 }
59
60 static void work_queue_item_remove(struct work_queue *wq,
61 struct work_queue_item *item)
62 {
63 assert(item && item->data);
64
65 /* call private data deletion callback if needed */
66 if (wq->spec.del_item_data)
67 wq->spec.del_item_data(wq, item->data);
68
69 work_queue_item_dequeue(wq, item);
70
71 work_queue_item_free(item);
72
73 return;
74 }
75
76 /* create new work queue */
77 struct work_queue *work_queue_new(struct thread_master *m,
78 const char *queue_name)
79 {
80 struct work_queue *new;
81
82 new = XCALLOC(MTYPE_WORK_QUEUE, sizeof(struct work_queue));
83
84 if (new == NULL)
85 return new;
86
87 new->name = XSTRDUP(MTYPE_WORK_QUEUE_NAME, queue_name);
88 new->master = m;
89 SET_FLAG(new->flags, WQ_UNPLUGGED);
90
91 STAILQ_INIT(&new->items);
92
93 listnode_add(work_queues, new);
94
95 new->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
96
97 /* Default values, can be overriden by caller */
98 new->spec.hold = WORK_QUEUE_DEFAULT_HOLD;
99 new->spec.yield = THREAD_YIELD_TIME_SLOT;
100
101 return new;
102 }
103
104 void work_queue_free_original(struct work_queue *wq)
105 {
106 if (wq->thread != NULL)
107 thread_cancel(wq->thread);
108
109 while (!work_queue_empty(wq)) {
110 struct work_queue_item *item = work_queue_last_item(wq);
111
112 work_queue_item_remove(wq, item);
113 }
114
115 listnode_delete(work_queues, wq);
116
117 XFREE(MTYPE_WORK_QUEUE_NAME, wq->name);
118 XFREE(MTYPE_WORK_QUEUE, wq);
119 return;
120 }
121
122 void work_queue_free_and_null(struct work_queue **wq)
123 {
124 work_queue_free_original(*wq);
125 *wq = NULL;
126 }
127
128 bool work_queue_is_scheduled(struct work_queue *wq)
129 {
130 return (wq->thread != NULL);
131 }
132
133 static int work_queue_schedule(struct work_queue *wq, unsigned int delay)
134 {
135 /* if appropriate, schedule work queue thread */
136 if (CHECK_FLAG(wq->flags, WQ_UNPLUGGED) && (wq->thread == NULL)
137 && !work_queue_empty(wq)) {
138 wq->thread = NULL;
139 thread_add_timer_msec(wq->master, work_queue_run, wq, delay,
140 &wq->thread);
141 /* set thread yield time, if needed */
142 if (wq->thread && wq->spec.yield != THREAD_YIELD_TIME_SLOT)
143 thread_set_yield_time(wq->thread, wq->spec.yield);
144 return 1;
145 } else
146 return 0;
147 }
148
149 void work_queue_add(struct work_queue *wq, void *data)
150 {
151 struct work_queue_item *item;
152
153 assert(wq);
154
155 if (!(item = work_queue_item_new(wq))) {
156 zlog_err("%s: unable to get new queue item", __func__);
157 return;
158 }
159
160 item->data = data;
161 work_queue_item_enqueue(wq, item);
162
163 work_queue_schedule(wq, wq->spec.hold);
164
165 return;
166 }
167
168 static void work_queue_item_requeue(struct work_queue *wq,
169 struct work_queue_item *item)
170 {
171 work_queue_item_dequeue(wq, item);
172
173 /* attach to end of list */
174 work_queue_item_enqueue(wq, item);
175 }
176
177 DEFUN (show_work_queues,
178 show_work_queues_cmd,
179 "show work-queues",
180 SHOW_STR
181 "Work Queue information\n")
182 {
183 struct listnode *node;
184 struct work_queue *wq;
185
186 vty_out(vty, "%c %8s %5s %8s %8s %21s\n", ' ', "List", "(ms) ",
187 "Q. Runs", "Yields", "Cycle Counts ");
188 vty_out(vty, "%c %8s %5s %8s %8s %7s %6s %8s %6s %s\n", 'P', "Items",
189 "Hold", "Total", "Total", "Best", "Gran.", "Total", "Avg.",
190 "Name");
191
192 for (ALL_LIST_ELEMENTS_RO(work_queues, node, wq)) {
193 vty_out(vty, "%c %8d %5d %8ld %8ld %7d %6d %8ld %6u %s\n",
194 (CHECK_FLAG(wq->flags, WQ_UNPLUGGED) ? ' ' : 'P'),
195 work_queue_item_count(wq), wq->spec.hold, wq->runs,
196 wq->yields, wq->cycles.best, wq->cycles.granularity,
197 wq->cycles.total,
198 (wq->runs) ? (unsigned int)(wq->cycles.total / wq->runs)
199 : 0,
200 wq->name);
201 }
202
203 return CMD_SUCCESS;
204 }
205
206 void workqueue_cmd_init(void)
207 {
208 install_element(VIEW_NODE, &show_work_queues_cmd);
209 }
210
211 /* 'plug' a queue: Stop it from being scheduled,
212 * ie: prevent the queue from draining.
213 */
214 void work_queue_plug(struct work_queue *wq)
215 {
216 if (wq->thread)
217 thread_cancel(wq->thread);
218
219 wq->thread = NULL;
220
221 UNSET_FLAG(wq->flags, WQ_UNPLUGGED);
222 }
223
224 /* unplug queue, schedule it again, if appropriate
225 * Ie: Allow the queue to be drained again
226 */
227 void work_queue_unplug(struct work_queue *wq)
228 {
229 SET_FLAG(wq->flags, WQ_UNPLUGGED);
230
231 /* if thread isnt already waiting, add one */
232 work_queue_schedule(wq, wq->spec.hold);
233 }
234
235 /* timer thread to process a work queue
236 * will reschedule itself if required,
237 * otherwise work_queue_item_add
238 */
239 int work_queue_run(struct thread *thread)
240 {
241 struct work_queue *wq;
242 struct work_queue_item *item, *titem;
243 wq_item_status ret;
244 unsigned int cycles = 0;
245 char yielded = 0;
246
247 wq = THREAD_ARG(thread);
248 wq->thread = NULL;
249
250 assert(wq);
251
252 /* calculate cycle granularity:
253 * list iteration == 1 run
254 * listnode processing == 1 cycle
255 * granularity == # cycles between checks whether we should yield.
256 *
257 * granularity should be > 0, and can increase slowly after each run to
258 * provide some hysteris, but not past cycles.best or 2*cycles.
259 *
260 * Best: starts low, can only increase
261 *
262 * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
263 * if we run to end of time slot, can increase otherwise
264 * by a small factor.
265 *
266 * We could use just the average and save some work, however we want to
267 * be
268 * able to adjust quickly to CPU pressure. Average wont shift much if
269 * daemon has been running a long time.
270 */
271 if (wq->cycles.granularity == 0)
272 wq->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY;
273
274 STAILQ_FOREACH_SAFE (item, &wq->items, wq, titem) {
275 assert(item && item->data);
276
277 /* dont run items which are past their allowed retries */
278 if (item->ran > wq->spec.max_retries) {
279 /* run error handler, if any */
280 if (wq->spec.errorfunc)
281 wq->spec.errorfunc(wq, item->data);
282 work_queue_item_remove(wq, item);
283 continue;
284 }
285
286 /* run and take care of items that want to be retried
287 * immediately */
288 do {
289 ret = wq->spec.workfunc(wq, item->data);
290 item->ran++;
291 } while ((ret == WQ_RETRY_NOW)
292 && (item->ran < wq->spec.max_retries));
293
294 switch (ret) {
295 case WQ_QUEUE_BLOCKED: {
296 /* decrement item->ran again, cause this isn't an item
297 * specific error, and fall through to WQ_RETRY_LATER
298 */
299 item->ran--;
300 }
301 case WQ_RETRY_LATER: {
302 goto stats;
303 }
304 case WQ_REQUEUE: {
305 item->ran--;
306 work_queue_item_requeue(wq, item);
307 /* If a single node is being used with a meta-queue
308 * (e.g., zebra),
309 * update the next node as we don't want to exit the
310 * thread and
311 * reschedule it after every node. By definition,
312 * WQ_REQUEUE is
313 * meant to continue the processing; the yield logic
314 * will kick in
315 * to terminate the thread when time has exceeded.
316 */
317 if (titem == NULL)
318 titem = item;
319 break;
320 }
321 case WQ_RETRY_NOW:
322 /* a RETRY_NOW that gets here has exceeded max_tries, same as
323 * ERROR */
324 case WQ_ERROR: {
325 if (wq->spec.errorfunc)
326 wq->spec.errorfunc(wq, item);
327 }
328 /* fallthru */
329 case WQ_SUCCESS:
330 default: {
331 work_queue_item_remove(wq, item);
332 break;
333 }
334 }
335
336 /* completed cycle */
337 cycles++;
338
339 /* test if we should yield */
340 if (!(cycles % wq->cycles.granularity)
341 && thread_should_yield(thread)) {
342 yielded = 1;
343 goto stats;
344 }
345 }
346
347 stats:
348
349 #define WQ_HYSTERESIS_FACTOR 4
350
351 /* we yielded, check whether granularity should be reduced */
352 if (yielded && (cycles < wq->cycles.granularity)) {
353 wq->cycles.granularity =
354 ((cycles > 0) ? cycles : WORK_QUEUE_MIN_GRANULARITY);
355 }
356 /* otherwise, should granularity increase? */
357 else if (cycles >= (wq->cycles.granularity)) {
358 if (cycles > wq->cycles.best)
359 wq->cycles.best = cycles;
360
361 /* along with yielded check, provides hysteresis for granularity
362 */
363 if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR
364 * WQ_HYSTERESIS_FACTOR))
365 wq->cycles.granularity *=
366 WQ_HYSTERESIS_FACTOR; /* quick ramp-up */
367 else if (cycles
368 > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR))
369 wq->cycles.granularity += WQ_HYSTERESIS_FACTOR;
370 }
371 #undef WQ_HYSTERIS_FACTOR
372
373 wq->runs++;
374 wq->cycles.total += cycles;
375 if (yielded)
376 wq->yields++;
377
378 #if 0
379 printf ("%s: cycles %d, new: best %d, worst %d\n",
380 __func__, cycles, wq->cycles.best, wq->cycles.granularity);
381 #endif
382
383 /* Is the queue done yet? If it is, call the completion callback. */
384 if (!work_queue_empty(wq))
385 work_queue_schedule(wq, 0);
386 else if (wq->spec.completion_func)
387 wq->spec.completion_func(wq);
388
389 return 0;
390 }