]>
Commit | Line | Data |
---|---|---|
d62a17ae | 1 | /* |
354d119a | 2 | * Quagga Work Queue Support. |
3 | * | |
4 | * Copyright (C) 2005 Sun Microsystems, Inc. | |
5 | * | |
6 | * This file is part of GNU Zebra. | |
7 | * | |
8 | * Quagga is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the | |
10 | * Free Software Foundation; either version 2, or (at your option) any | |
11 | * later version. | |
12 | * | |
13 | * Quagga is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * General Public License for more details. | |
17 | * | |
896014f4 DL |
18 | * You should have received a copy of the GNU General Public License along |
19 | * with this program; see the file COPYING; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
354d119a | 21 | */ |
22 | ||
7a2fbbf0 | 23 | #include <zebra.h> |
354d119a | 24 | #include "thread.h" |
25 | #include "memory.h" | |
26 | #include "workqueue.h" | |
27 | #include "linklist.h" | |
28 | #include "command.h" | |
29 | #include "log.h" | |
30 | ||
d62a17ae | 31 | DEFINE_MTYPE(LIB, WORK_QUEUE, "Work queue") |
4a1ab8e4 DL |
32 | DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_ITEM, "Work queue item") |
33 | DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_NAME, "Work queue name string") | |
34 | ||
354d119a | 35 | /* master list of work_queues */ |
24873f0c | 36 | static struct list _work_queues; |
1f9a9fff | 37 | /* pointer primarily to avoid an otherwise harmless warning on |
24873f0c DS |
38 | * ALL_LIST_ELEMENTS_RO |
39 | */ | |
40 | static struct list *work_queues = &_work_queues; | |
354d119a | 41 | |
42 | #define WORK_QUEUE_MIN_GRANULARITY 1 | |
43 | ||
d62a17ae | 44 | static struct work_queue_item *work_queue_item_new(struct work_queue *wq) |
354d119a | 45 | { |
d62a17ae | 46 | struct work_queue_item *item; |
47 | assert(wq); | |
354d119a | 48 | |
d62a17ae | 49 | item = XCALLOC(MTYPE_WORK_QUEUE_ITEM, sizeof(struct work_queue_item)); |
50 | ||
51 | return item; | |
354d119a | 52 | } |
53 | ||
d62a17ae | 54 | static void work_queue_item_free(struct work_queue_item *item) |
354d119a | 55 | { |
d62a17ae | 56 | XFREE(MTYPE_WORK_QUEUE_ITEM, item); |
57 | return; | |
354d119a | 58 | } |
59 | ||
60 | /* create new work queue */ | |
d62a17ae | 61 | struct work_queue *work_queue_new(struct thread_master *m, |
62 | const char *queue_name) | |
354d119a | 63 | { |
d62a17ae | 64 | struct work_queue *new; |
65 | ||
66 | new = XCALLOC(MTYPE_WORK_QUEUE, sizeof(struct work_queue)); | |
67 | ||
68 | if (new == NULL) | |
69 | return new; | |
70 | ||
71 | new->name = XSTRDUP(MTYPE_WORK_QUEUE_NAME, queue_name); | |
72 | new->master = m; | |
73 | SET_FLAG(new->flags, WQ_UNPLUGGED); | |
74 | ||
75 | if ((new->items = list_new()) == NULL) { | |
76 | XFREE(MTYPE_WORK_QUEUE_NAME, new->name); | |
77 | XFREE(MTYPE_WORK_QUEUE, new); | |
78 | ||
79 | return NULL; | |
80 | } | |
81 | ||
82 | new->items->del = (void (*)(void *))work_queue_item_free; | |
83 | ||
84 | listnode_add(work_queues, new); | |
85 | ||
86 | new->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY; | |
87 | ||
88 | /* Default values, can be overriden by caller */ | |
89 | new->spec.hold = WORK_QUEUE_DEFAULT_HOLD; | |
90 | new->spec.yield = THREAD_YIELD_TIME_SLOT; | |
91 | ||
92 | return new; | |
354d119a | 93 | } |
94 | ||
d62a17ae | 95 | void work_queue_free(struct work_queue *wq) |
354d119a | 96 | { |
d62a17ae | 97 | if (wq->thread != NULL) |
98 | thread_cancel(wq->thread); | |
99 | ||
100 | /* list_delete frees items via callback */ | |
101 | list_delete(wq->items); | |
102 | listnode_delete(work_queues, wq); | |
103 | ||
104 | XFREE(MTYPE_WORK_QUEUE_NAME, wq->name); | |
105 | XFREE(MTYPE_WORK_QUEUE, wq); | |
106 | return; | |
354d119a | 107 | } |
108 | ||
d62a17ae | 109 | bool work_queue_is_scheduled(struct work_queue *wq) |
86582682 | 110 | { |
d62a17ae | 111 | return (wq->thread != NULL); |
86582682 PJ |
112 | } |
113 | ||
d62a17ae | 114 | static int work_queue_schedule(struct work_queue *wq, unsigned int delay) |
269d74fd | 115 | { |
d62a17ae | 116 | /* if appropriate, schedule work queue thread */ |
117 | if (CHECK_FLAG(wq->flags, WQ_UNPLUGGED) && (wq->thread == NULL) | |
118 | && (listcount(wq->items) > 0)) { | |
119 | wq->thread = NULL; | |
120 | thread_add_timer_msec(wq->master, work_queue_run, wq, delay, | |
121 | &wq->thread); | |
122 | /* set thread yield time, if needed */ | |
123 | if (wq->thread && wq->spec.yield != THREAD_YIELD_TIME_SLOT) | |
124 | thread_set_yield_time(wq->thread, wq->spec.yield); | |
125 | return 1; | |
126 | } else | |
127 | return 0; | |
269d74fd | 128 | } |
d62a17ae | 129 | |
130 | void work_queue_add(struct work_queue *wq, void *data) | |
354d119a | 131 | { |
d62a17ae | 132 | struct work_queue_item *item; |
133 | ||
134 | assert(wq); | |
135 | ||
136 | if (!(item = work_queue_item_new(wq))) { | |
137 | zlog_err("%s: unable to get new queue item", __func__); | |
138 | return; | |
139 | } | |
140 | ||
141 | item->data = data; | |
142 | listnode_add(wq->items, item); | |
143 | ||
144 | work_queue_schedule(wq, wq->spec.hold); | |
145 | ||
146 | return; | |
354d119a | 147 | } |
148 | ||
d62a17ae | 149 | static void work_queue_item_remove(struct work_queue *wq, struct listnode *ln) |
354d119a | 150 | { |
d62a17ae | 151 | struct work_queue_item *item = listgetdata(ln); |
354d119a | 152 | |
d62a17ae | 153 | assert(item && item->data); |
354d119a | 154 | |
d62a17ae | 155 | /* call private data deletion callback if needed */ |
156 | if (wq->spec.del_item_data) | |
157 | wq->spec.del_item_data(wq, item->data); | |
354d119a | 158 | |
d62a17ae | 159 | list_delete_node(wq->items, ln); |
160 | work_queue_item_free(item); | |
161 | ||
162 | return; | |
354d119a | 163 | } |
164 | ||
d62a17ae | 165 | static void work_queue_item_requeue(struct work_queue *wq, struct listnode *ln) |
354d119a | 166 | { |
d62a17ae | 167 | LISTNODE_DETACH(wq->items, ln); |
168 | LISTNODE_ATTACH(wq->items, ln); /* attach to end of list */ | |
354d119a | 169 | } |
170 | ||
49d41a26 DS |
171 | DEFUN (show_work_queues, |
172 | show_work_queues_cmd, | |
173 | "show work-queues", | |
174 | SHOW_STR | |
175 | "Work Queue information\n") | |
354d119a | 176 | { |
d62a17ae | 177 | struct listnode *node; |
178 | struct work_queue *wq; | |
179 | ||
180 | vty_out(vty, "%c %8s %5s %8s %8s %21s\n", ' ', "List", "(ms) ", | |
181 | "Q. Runs", "Yields", "Cycle Counts "); | |
182 | vty_out(vty, "%c %8s %5s %8s %8s %7s %6s %8s %6s %s\n", 'P', "Items", | |
183 | "Hold", "Total", "Total", "Best", "Gran.", "Total", "Avg.", | |
184 | "Name"); | |
185 | ||
186 | for (ALL_LIST_ELEMENTS_RO(work_queues, node, wq)) { | |
187 | vty_out(vty, "%c %8d %5d %8ld %8ld %7d %6d %8ld %6u %s\n", | |
188 | (CHECK_FLAG(wq->flags, WQ_UNPLUGGED) ? ' ' : 'P'), | |
189 | listcount(wq->items), wq->spec.hold, wq->runs, | |
190 | wq->yields, wq->cycles.best, wq->cycles.granularity, | |
191 | wq->cycles.total, | |
192 | (wq->runs) ? (unsigned int)(wq->cycles.total / wq->runs) | |
193 | : 0, | |
194 | wq->name); | |
195 | } | |
196 | ||
197 | return CMD_SUCCESS; | |
354d119a | 198 | } |
199 | ||
d62a17ae | 200 | void workqueue_cmd_init(void) |
0b84f294 | 201 | { |
d62a17ae | 202 | install_element(VIEW_NODE, &show_work_queues_cmd); |
0b84f294 DL |
203 | } |
204 | ||
269d74fd | 205 | /* 'plug' a queue: Stop it from being scheduled, |
206 | * ie: prevent the queue from draining. | |
207 | */ | |
d62a17ae | 208 | void work_queue_plug(struct work_queue *wq) |
269d74fd | 209 | { |
d62a17ae | 210 | if (wq->thread) |
211 | thread_cancel(wq->thread); | |
212 | ||
213 | wq->thread = NULL; | |
214 | ||
215 | UNSET_FLAG(wq->flags, WQ_UNPLUGGED); | |
269d74fd | 216 | } |
217 | ||
218 | /* unplug queue, schedule it again, if appropriate | |
219 | * Ie: Allow the queue to be drained again | |
220 | */ | |
d62a17ae | 221 | void work_queue_unplug(struct work_queue *wq) |
269d74fd | 222 | { |
d62a17ae | 223 | SET_FLAG(wq->flags, WQ_UNPLUGGED); |
269d74fd | 224 | |
d62a17ae | 225 | /* if thread isnt already waiting, add one */ |
226 | work_queue_schedule(wq, wq->spec.hold); | |
269d74fd | 227 | } |
228 | ||
354d119a | 229 | /* timer thread to process a work queue |
230 | * will reschedule itself if required, | |
d62a17ae | 231 | * otherwise work_queue_item_add |
354d119a | 232 | */ |
d62a17ae | 233 | int work_queue_run(struct thread *thread) |
354d119a | 234 | { |
d62a17ae | 235 | struct work_queue *wq; |
236 | struct work_queue_item *item; | |
237 | wq_item_status ret; | |
238 | unsigned int cycles = 0; | |
239 | struct listnode *node, *nnode; | |
240 | char yielded = 0; | |
241 | ||
242 | wq = THREAD_ARG(thread); | |
243 | wq->thread = NULL; | |
244 | ||
245 | assert(wq && wq->items); | |
246 | ||
247 | /* calculate cycle granularity: | |
248 | * list iteration == 1 run | |
249 | * listnode processing == 1 cycle | |
250 | * granularity == # cycles between checks whether we should yield. | |
251 | * | |
252 | * granularity should be > 0, and can increase slowly after each run to | |
253 | * provide some hysteris, but not past cycles.best or 2*cycles. | |
254 | * | |
255 | * Best: starts low, can only increase | |
256 | * | |
257 | * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased | |
258 | * if we run to end of time slot, can increase otherwise | |
259 | * by a small factor. | |
260 | * | |
261 | * We could use just the average and save some work, however we want to | |
262 | * be | |
263 | * able to adjust quickly to CPU pressure. Average wont shift much if | |
264 | * daemon has been running a long time. | |
265 | */ | |
266 | if (wq->cycles.granularity == 0) | |
267 | wq->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY; | |
268 | ||
269 | for (ALL_LIST_ELEMENTS(wq->items, node, nnode, item)) { | |
270 | assert(item && item->data); | |
271 | ||
272 | /* dont run items which are past their allowed retries */ | |
273 | if (item->ran > wq->spec.max_retries) { | |
274 | /* run error handler, if any */ | |
275 | if (wq->spec.errorfunc) | |
276 | wq->spec.errorfunc(wq, item->data); | |
277 | work_queue_item_remove(wq, node); | |
278 | continue; | |
279 | } | |
280 | ||
281 | /* run and take care of items that want to be retried | |
282 | * immediately */ | |
283 | do { | |
284 | ret = wq->spec.workfunc(wq, item->data); | |
285 | item->ran++; | |
286 | } while ((ret == WQ_RETRY_NOW) | |
287 | && (item->ran < wq->spec.max_retries)); | |
288 | ||
289 | switch (ret) { | |
290 | case WQ_QUEUE_BLOCKED: { | |
291 | /* decrement item->ran again, cause this isn't an item | |
292 | * specific error, and fall through to WQ_RETRY_LATER | |
293 | */ | |
294 | item->ran--; | |
295 | } | |
296 | case WQ_RETRY_LATER: { | |
297 | goto stats; | |
298 | } | |
299 | case WQ_REQUEUE: { | |
300 | item->ran--; | |
301 | work_queue_item_requeue(wq, node); | |
302 | /* If a single node is being used with a meta-queue | |
303 | * (e.g., zebra), | |
304 | * update the next node as we don't want to exit the | |
305 | * thread and | |
306 | * reschedule it after every node. By definition, | |
307 | * WQ_REQUEUE is | |
308 | * meant to continue the processing; the yield logic | |
309 | * will kick in | |
310 | * to terminate the thread when time has exceeded. | |
311 | */ | |
312 | if (nnode == NULL) | |
313 | nnode = node; | |
314 | break; | |
315 | } | |
316 | case WQ_RETRY_NOW: | |
317 | /* a RETRY_NOW that gets here has exceeded max_tries, same as | |
318 | * ERROR */ | |
319 | case WQ_ERROR: { | |
320 | if (wq->spec.errorfunc) | |
321 | wq->spec.errorfunc(wq, item); | |
322 | } | |
323 | /* fallthru */ | |
324 | case WQ_SUCCESS: | |
325 | default: { | |
326 | work_queue_item_remove(wq, node); | |
327 | break; | |
328 | } | |
329 | } | |
330 | ||
331 | /* completed cycle */ | |
332 | cycles++; | |
333 | ||
334 | /* test if we should yield */ | |
335 | if (!(cycles % wq->cycles.granularity) | |
336 | && thread_should_yield(thread)) { | |
337 | yielded = 1; | |
338 | goto stats; | |
339 | } | |
354d119a | 340 | } |
354d119a | 341 | |
342 | stats: | |
343 | ||
3322055b | 344 | #define WQ_HYSTERESIS_FACTOR 4 |
354d119a | 345 | |
d62a17ae | 346 | /* we yielded, check whether granularity should be reduced */ |
347 | if (yielded && (cycles < wq->cycles.granularity)) { | |
348 | wq->cycles.granularity = | |
349 | ((cycles > 0) ? cycles : WORK_QUEUE_MIN_GRANULARITY); | |
350 | } | |
351 | /* otherwise, should granularity increase? */ | |
352 | else if (cycles >= (wq->cycles.granularity)) { | |
353 | if (cycles > wq->cycles.best) | |
354 | wq->cycles.best = cycles; | |
355 | ||
356 | /* along with yielded check, provides hysteresis for granularity | |
357 | */ | |
358 | if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR | |
359 | * WQ_HYSTERESIS_FACTOR)) | |
360 | wq->cycles.granularity *= | |
361 | WQ_HYSTERESIS_FACTOR; /* quick ramp-up */ | |
362 | else if (cycles | |
363 | > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR)) | |
364 | wq->cycles.granularity += WQ_HYSTERESIS_FACTOR; | |
365 | } | |
354d119a | 366 | #undef WQ_HYSTERIS_FACTOR |
d62a17ae | 367 | |
368 | wq->runs++; | |
369 | wq->cycles.total += cycles; | |
370 | if (yielded) | |
371 | wq->yields++; | |
354d119a | 372 | |
373 | #if 0 | |
374 | printf ("%s: cycles %d, new: best %d, worst %d\n", | |
375 | __func__, cycles, wq->cycles.best, wq->cycles.granularity); | |
376 | #endif | |
d62a17ae | 377 | |
378 | /* Is the queue done yet? If it is, call the completion callback. */ | |
379 | if (listcount(wq->items) > 0) | |
380 | work_queue_schedule(wq, 0); | |
381 | else if (wq->spec.completion_func) | |
382 | wq->spec.completion_func(wq); | |
383 | ||
384 | return 0; | |
354d119a | 385 | } |