]>
Commit | Line | Data |
---|---|---|
d62a17ae | 1 | /* |
354d119a | 2 | * Quagga Work Queue Support. |
3 | * | |
4 | * Copyright (C) 2005 Sun Microsystems, Inc. | |
5 | * | |
6 | * This file is part of GNU Zebra. | |
7 | * | |
8 | * Quagga is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the | |
10 | * Free Software Foundation; either version 2, or (at your option) any | |
11 | * later version. | |
12 | * | |
13 | * Quagga is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * General Public License for more details. | |
17 | * | |
896014f4 DL |
18 | * You should have received a copy of the GNU General Public License along |
19 | * with this program; see the file COPYING; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
354d119a | 21 | */ |
22 | ||
7a2fbbf0 | 23 | #include <zebra.h> |
354d119a | 24 | #include "thread.h" |
25 | #include "memory.h" | |
26 | #include "workqueue.h" | |
27 | #include "linklist.h" | |
28 | #include "command.h" | |
29 | #include "log.h" | |
30 | ||
d62a17ae | 31 | DEFINE_MTYPE(LIB, WORK_QUEUE, "Work queue") |
4a1ab8e4 DL |
32 | DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_ITEM, "Work queue item") |
33 | DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_NAME, "Work queue name string") | |
34 | ||
354d119a | 35 | /* master list of work_queues */ |
24873f0c | 36 | static struct list _work_queues; |
1f9a9fff | 37 | /* pointer primarily to avoid an otherwise harmless warning on |
24873f0c DS |
38 | * ALL_LIST_ELEMENTS_RO |
39 | */ | |
40 | static struct list *work_queues = &_work_queues; | |
354d119a | 41 | |
42 | #define WORK_QUEUE_MIN_GRANULARITY 1 | |
43 | ||
d62a17ae | 44 | static struct work_queue_item *work_queue_item_new(struct work_queue *wq) |
354d119a | 45 | { |
d62a17ae | 46 | struct work_queue_item *item; |
47 | assert(wq); | |
354d119a | 48 | |
d62a17ae | 49 | item = XCALLOC(MTYPE_WORK_QUEUE_ITEM, sizeof(struct work_queue_item)); |
50 | ||
51 | return item; | |
354d119a | 52 | } |
53 | ||
d62a17ae | 54 | static void work_queue_item_free(struct work_queue_item *item) |
354d119a | 55 | { |
d62a17ae | 56 | XFREE(MTYPE_WORK_QUEUE_ITEM, item); |
57 | return; | |
354d119a | 58 | } |
59 | ||
da7f979a DS |
60 | static void work_queue_item_remove(struct work_queue *wq, |
61 | struct work_queue_item *item) | |
62 | { | |
63 | assert(item && item->data); | |
64 | ||
65 | /* call private data deletion callback if needed */ | |
66 | if (wq->spec.del_item_data) | |
67 | wq->spec.del_item_data(wq, item->data); | |
68 | ||
69 | work_queue_item_dequeue(wq, item); | |
70 | ||
71 | work_queue_item_free(item); | |
72 | ||
73 | return; | |
74 | } | |
75 | ||
354d119a | 76 | /* create new work queue */ |
d62a17ae | 77 | struct work_queue *work_queue_new(struct thread_master *m, |
78 | const char *queue_name) | |
354d119a | 79 | { |
d62a17ae | 80 | struct work_queue *new; |
81 | ||
82 | new = XCALLOC(MTYPE_WORK_QUEUE, sizeof(struct work_queue)); | |
83 | ||
d62a17ae | 84 | new->name = XSTRDUP(MTYPE_WORK_QUEUE_NAME, queue_name); |
85 | new->master = m; | |
86 | SET_FLAG(new->flags, WQ_UNPLUGGED); | |
87 | ||
f104f6c1 | 88 | STAILQ_INIT(&new->items); |
d62a17ae | 89 | |
90 | listnode_add(work_queues, new); | |
91 | ||
92 | new->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY; | |
93 | ||
5418f988 | 94 | /* Default values, can be overridden by caller */ |
d62a17ae | 95 | new->spec.hold = WORK_QUEUE_DEFAULT_HOLD; |
96 | new->spec.yield = THREAD_YIELD_TIME_SLOT; | |
5418f988 | 97 | new->spec.retry = WORK_QUEUE_DEFAULT_RETRY; |
d62a17ae | 98 | |
99 | return new; | |
354d119a | 100 | } |
101 | ||
6b097e33 | 102 | void work_queue_free_and_null(struct work_queue **wqp) |
354d119a | 103 | { |
6b097e33 MS |
104 | struct work_queue *wq = *wqp; |
105 | ||
d62a17ae | 106 | if (wq->thread != NULL) |
b3d6bc6e | 107 | thread_cancel(&(wq->thread)); |
d62a17ae | 108 | |
da7f979a DS |
109 | while (!work_queue_empty(wq)) { |
110 | struct work_queue_item *item = work_queue_last_item(wq); | |
111 | ||
112 | work_queue_item_remove(wq, item); | |
113 | } | |
114 | ||
d62a17ae | 115 | listnode_delete(work_queues, wq); |
116 | ||
117 | XFREE(MTYPE_WORK_QUEUE_NAME, wq->name); | |
118 | XFREE(MTYPE_WORK_QUEUE, wq); | |
354d119a | 119 | |
6b097e33 | 120 | *wqp = NULL; |
e208c8f9 DS |
121 | } |
122 | ||
d62a17ae | 123 | bool work_queue_is_scheduled(struct work_queue *wq) |
86582682 | 124 | { |
d62a17ae | 125 | return (wq->thread != NULL); |
86582682 PJ |
126 | } |
127 | ||
d62a17ae | 128 | static int work_queue_schedule(struct work_queue *wq, unsigned int delay) |
269d74fd | 129 | { |
d62a17ae | 130 | /* if appropriate, schedule work queue thread */ |
996c9314 LB |
131 | if (CHECK_FLAG(wq->flags, WQ_UNPLUGGED) && (wq->thread == NULL) |
132 | && !work_queue_empty(wq)) { | |
d62a17ae | 133 | wq->thread = NULL; |
5418f988 MS |
134 | |
135 | /* Schedule timer if there's a delay, otherwise just schedule | |
136 | * as an 'event' | |
137 | */ | |
138 | if (delay > 0) | |
139 | thread_add_timer_msec(wq->master, work_queue_run, wq, | |
140 | delay, &wq->thread); | |
141 | else | |
142 | thread_add_event(wq->master, work_queue_run, wq, 0, | |
143 | &wq->thread); | |
144 | ||
d62a17ae | 145 | /* set thread yield time, if needed */ |
146 | if (wq->thread && wq->spec.yield != THREAD_YIELD_TIME_SLOT) | |
147 | thread_set_yield_time(wq->thread, wq->spec.yield); | |
148 | return 1; | |
149 | } else | |
150 | return 0; | |
269d74fd | 151 | } |
d62a17ae | 152 | |
153 | void work_queue_add(struct work_queue *wq, void *data) | |
354d119a | 154 | { |
d62a17ae | 155 | struct work_queue_item *item; |
156 | ||
157 | assert(wq); | |
158 | ||
0ce1ca80 | 159 | item = work_queue_item_new(wq); |
d62a17ae | 160 | |
161 | item->data = data; | |
f104f6c1 | 162 | work_queue_item_enqueue(wq, item); |
d62a17ae | 163 | |
164 | work_queue_schedule(wq, wq->spec.hold); | |
165 | ||
166 | return; | |
354d119a | 167 | } |
168 | ||
996c9314 LB |
169 | static void work_queue_item_requeue(struct work_queue *wq, |
170 | struct work_queue_item *item) | |
354d119a | 171 | { |
f104f6c1 JB |
172 | work_queue_item_dequeue(wq, item); |
173 | ||
174 | /* attach to end of list */ | |
175 | work_queue_item_enqueue(wq, item); | |
354d119a | 176 | } |
177 | ||
49d41a26 DS |
178 | DEFUN (show_work_queues, |
179 | show_work_queues_cmd, | |
180 | "show work-queues", | |
181 | SHOW_STR | |
182 | "Work Queue information\n") | |
354d119a | 183 | { |
d62a17ae | 184 | struct listnode *node; |
185 | struct work_queue *wq; | |
186 | ||
187 | vty_out(vty, "%c %8s %5s %8s %8s %21s\n", ' ', "List", "(ms) ", | |
188 | "Q. Runs", "Yields", "Cycle Counts "); | |
189 | vty_out(vty, "%c %8s %5s %8s %8s %7s %6s %8s %6s %s\n", 'P', "Items", | |
190 | "Hold", "Total", "Total", "Best", "Gran.", "Total", "Avg.", | |
191 | "Name"); | |
192 | ||
193 | for (ALL_LIST_ELEMENTS_RO(work_queues, node, wq)) { | |
194 | vty_out(vty, "%c %8d %5d %8ld %8ld %7d %6d %8ld %6u %s\n", | |
195 | (CHECK_FLAG(wq->flags, WQ_UNPLUGGED) ? ' ' : 'P'), | |
f104f6c1 | 196 | work_queue_item_count(wq), wq->spec.hold, wq->runs, |
d62a17ae | 197 | wq->yields, wq->cycles.best, wq->cycles.granularity, |
198 | wq->cycles.total, | |
199 | (wq->runs) ? (unsigned int)(wq->cycles.total / wq->runs) | |
200 | : 0, | |
201 | wq->name); | |
202 | } | |
203 | ||
204 | return CMD_SUCCESS; | |
354d119a | 205 | } |
206 | ||
d62a17ae | 207 | void workqueue_cmd_init(void) |
0b84f294 | 208 | { |
d62a17ae | 209 | install_element(VIEW_NODE, &show_work_queues_cmd); |
0b84f294 DL |
210 | } |
211 | ||
269d74fd | 212 | /* 'plug' a queue: Stop it from being scheduled, |
213 | * ie: prevent the queue from draining. | |
214 | */ | |
d62a17ae | 215 | void work_queue_plug(struct work_queue *wq) |
269d74fd | 216 | { |
d62a17ae | 217 | if (wq->thread) |
b3d6bc6e | 218 | thread_cancel(&(wq->thread)); |
d62a17ae | 219 | |
220 | wq->thread = NULL; | |
221 | ||
222 | UNSET_FLAG(wq->flags, WQ_UNPLUGGED); | |
269d74fd | 223 | } |
224 | ||
225 | /* unplug queue, schedule it again, if appropriate | |
226 | * Ie: Allow the queue to be drained again | |
227 | */ | |
d62a17ae | 228 | void work_queue_unplug(struct work_queue *wq) |
269d74fd | 229 | { |
d62a17ae | 230 | SET_FLAG(wq->flags, WQ_UNPLUGGED); |
269d74fd | 231 | |
d62a17ae | 232 | /* if thread isnt already waiting, add one */ |
233 | work_queue_schedule(wq, wq->spec.hold); | |
269d74fd | 234 | } |
235 | ||
354d119a | 236 | /* timer thread to process a work queue |
237 | * will reschedule itself if required, | |
d62a17ae | 238 | * otherwise work_queue_item_add |
354d119a | 239 | */ |
d62a17ae | 240 | int work_queue_run(struct thread *thread) |
354d119a | 241 | { |
d62a17ae | 242 | struct work_queue *wq; |
f104f6c1 | 243 | struct work_queue_item *item, *titem; |
5418f988 | 244 | wq_item_status ret = WQ_SUCCESS; |
d62a17ae | 245 | unsigned int cycles = 0; |
d62a17ae | 246 | char yielded = 0; |
247 | ||
248 | wq = THREAD_ARG(thread); | |
d62a17ae | 249 | |
f104f6c1 | 250 | assert(wq); |
d62a17ae | 251 | |
b575a12c A |
252 | wq->thread = NULL; |
253 | ||
d62a17ae | 254 | /* calculate cycle granularity: |
255 | * list iteration == 1 run | |
256 | * listnode processing == 1 cycle | |
257 | * granularity == # cycles between checks whether we should yield. | |
258 | * | |
259 | * granularity should be > 0, and can increase slowly after each run to | |
260 | * provide some hysteris, but not past cycles.best or 2*cycles. | |
261 | * | |
262 | * Best: starts low, can only increase | |
263 | * | |
264 | * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased | |
265 | * if we run to end of time slot, can increase otherwise | |
266 | * by a small factor. | |
267 | * | |
268 | * We could use just the average and save some work, however we want to | |
269 | * be | |
270 | * able to adjust quickly to CPU pressure. Average wont shift much if | |
271 | * daemon has been running a long time. | |
272 | */ | |
273 | if (wq->cycles.granularity == 0) | |
274 | wq->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY; | |
275 | ||
a2addae8 | 276 | STAILQ_FOREACH_SAFE (item, &wq->items, wq, titem) { |
1383ff9c | 277 | assert(item->data); |
d62a17ae | 278 | |
279 | /* dont run items which are past their allowed retries */ | |
280 | if (item->ran > wq->spec.max_retries) { | |
281 | /* run error handler, if any */ | |
282 | if (wq->spec.errorfunc) | |
095e1819 | 283 | wq->spec.errorfunc(wq, item); |
f104f6c1 | 284 | work_queue_item_remove(wq, item); |
d62a17ae | 285 | continue; |
286 | } | |
287 | ||
288 | /* run and take care of items that want to be retried | |
289 | * immediately */ | |
290 | do { | |
291 | ret = wq->spec.workfunc(wq, item->data); | |
292 | item->ran++; | |
293 | } while ((ret == WQ_RETRY_NOW) | |
294 | && (item->ran < wq->spec.max_retries)); | |
295 | ||
296 | switch (ret) { | |
297 | case WQ_QUEUE_BLOCKED: { | |
298 | /* decrement item->ran again, cause this isn't an item | |
299 | * specific error, and fall through to WQ_RETRY_LATER | |
300 | */ | |
301 | item->ran--; | |
302 | } | |
303 | case WQ_RETRY_LATER: { | |
304 | goto stats; | |
305 | } | |
306 | case WQ_REQUEUE: { | |
307 | item->ran--; | |
f104f6c1 | 308 | work_queue_item_requeue(wq, item); |
d62a17ae | 309 | /* If a single node is being used with a meta-queue |
310 | * (e.g., zebra), | |
311 | * update the next node as we don't want to exit the | |
312 | * thread and | |
313 | * reschedule it after every node. By definition, | |
314 | * WQ_REQUEUE is | |
315 | * meant to continue the processing; the yield logic | |
316 | * will kick in | |
317 | * to terminate the thread when time has exceeded. | |
318 | */ | |
f104f6c1 JB |
319 | if (titem == NULL) |
320 | titem = item; | |
d62a17ae | 321 | break; |
322 | } | |
323 | case WQ_RETRY_NOW: | |
324 | /* a RETRY_NOW that gets here has exceeded max_tries, same as | |
325 | * ERROR */ | |
326 | case WQ_ERROR: { | |
327 | if (wq->spec.errorfunc) | |
328 | wq->spec.errorfunc(wq, item); | |
329 | } | |
330 | /* fallthru */ | |
331 | case WQ_SUCCESS: | |
332 | default: { | |
f104f6c1 | 333 | work_queue_item_remove(wq, item); |
d62a17ae | 334 | break; |
335 | } | |
336 | } | |
337 | ||
338 | /* completed cycle */ | |
339 | cycles++; | |
340 | ||
341 | /* test if we should yield */ | |
342 | if (!(cycles % wq->cycles.granularity) | |
343 | && thread_should_yield(thread)) { | |
344 | yielded = 1; | |
345 | goto stats; | |
346 | } | |
354d119a | 347 | } |
354d119a | 348 | |
349 | stats: | |
350 | ||
3322055b | 351 | #define WQ_HYSTERESIS_FACTOR 4 |
354d119a | 352 | |
d62a17ae | 353 | /* we yielded, check whether granularity should be reduced */ |
354 | if (yielded && (cycles < wq->cycles.granularity)) { | |
355 | wq->cycles.granularity = | |
356 | ((cycles > 0) ? cycles : WORK_QUEUE_MIN_GRANULARITY); | |
357 | } | |
358 | /* otherwise, should granularity increase? */ | |
359 | else if (cycles >= (wq->cycles.granularity)) { | |
360 | if (cycles > wq->cycles.best) | |
361 | wq->cycles.best = cycles; | |
362 | ||
363 | /* along with yielded check, provides hysteresis for granularity | |
364 | */ | |
365 | if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR | |
366 | * WQ_HYSTERESIS_FACTOR)) | |
367 | wq->cycles.granularity *= | |
368 | WQ_HYSTERESIS_FACTOR; /* quick ramp-up */ | |
369 | else if (cycles | |
370 | > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR)) | |
371 | wq->cycles.granularity += WQ_HYSTERESIS_FACTOR; | |
372 | } | |
354d119a | 373 | #undef WQ_HYSTERIS_FACTOR |
d62a17ae | 374 | |
375 | wq->runs++; | |
376 | wq->cycles.total += cycles; | |
377 | if (yielded) | |
378 | wq->yields++; | |
354d119a | 379 | |
380 | #if 0 | |
381 | printf ("%s: cycles %d, new: best %d, worst %d\n", | |
382 | __func__, cycles, wq->cycles.best, wq->cycles.granularity); | |
383 | #endif | |
d62a17ae | 384 | |
385 | /* Is the queue done yet? If it is, call the completion callback. */ | |
5418f988 MS |
386 | if (!work_queue_empty(wq)) { |
387 | if (ret == WQ_RETRY_LATER || | |
388 | ret == WQ_QUEUE_BLOCKED) | |
389 | work_queue_schedule(wq, wq->spec.retry); | |
390 | else | |
391 | work_queue_schedule(wq, 0); | |
392 | ||
393 | } else if (wq->spec.completion_func) | |
d62a17ae | 394 | wq->spec.completion_func(wq); |
395 | ||
396 | return 0; | |
354d119a | 397 | } |