]>
Commit | Line | Data |
---|---|---|
354d119a | 1 | /* |
2 | * Quagga Work Queue Support. | |
3 | * | |
4 | * Copyright (C) 2005 Sun Microsystems, Inc. | |
5 | * | |
6 | * This file is part of GNU Zebra. | |
7 | * | |
8 | * Quagga is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the | |
10 | * Free Software Foundation; either version 2, or (at your option) any | |
11 | * later version. | |
12 | * | |
13 | * Quagga is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with Quagga; see the file COPYING. If not, write to the Free | |
20 | * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA | |
21 | * 02111-1307, USA. | |
22 | */ | |
23 | ||
7a2fbbf0 | 24 | #include <zebra.h> |
354d119a | 25 | #include "thread.h" |
26 | #include "memory.h" | |
27 | #include "workqueue.h" | |
28 | #include "linklist.h" | |
29 | #include "command.h" | |
30 | #include "log.h" | |
31 | ||
4a1ab8e4 DL |
32 | DEFINE_MTYPE(LIB, WORK_QUEUE, "Work queue") |
33 | DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_ITEM, "Work queue item") | |
34 | DEFINE_MTYPE_STATIC(LIB, WORK_QUEUE_NAME, "Work queue name string") | |
35 | ||
354d119a | 36 | /* master list of work_queues */ |
24873f0c | 37 | static struct list _work_queues; |
1f9a9fff | 38 | /* pointer primarily to avoid an otherwise harmless warning on |
24873f0c DS |
39 | * ALL_LIST_ELEMENTS_RO |
40 | */ | |
41 | static struct list *work_queues = &_work_queues; | |
354d119a | 42 | |
43 | #define WORK_QUEUE_MIN_GRANULARITY 1 | |
44 | ||
45 | static struct work_queue_item * | |
46 | work_queue_item_new (struct work_queue *wq) | |
47 | { | |
48 | struct work_queue_item *item; | |
49 | assert (wq); | |
50 | ||
51 | item = XCALLOC (MTYPE_WORK_QUEUE_ITEM, | |
52 | sizeof (struct work_queue_item)); | |
53 | ||
54 | return item; | |
55 | } | |
56 | ||
57 | static void | |
58 | work_queue_item_free (struct work_queue_item *item) | |
59 | { | |
60 | XFREE (MTYPE_WORK_QUEUE_ITEM, item); | |
61 | return; | |
62 | } | |
63 | ||
64 | /* create new work queue */ | |
65 | struct work_queue * | |
66 | work_queue_new (struct thread_master *m, const char *queue_name) | |
67 | { | |
68 | struct work_queue *new; | |
69 | ||
70 | new = XCALLOC (MTYPE_WORK_QUEUE, sizeof (struct work_queue)); | |
71 | ||
72 | if (new == NULL) | |
73 | return new; | |
74 | ||
75 | new->name = XSTRDUP (MTYPE_WORK_QUEUE_NAME, queue_name); | |
76 | new->master = m; | |
6ce80bdb | 77 | SET_FLAG (new->flags, WQ_UNPLUGGED); |
354d119a | 78 | |
79 | if ( (new->items = list_new ()) == NULL) | |
80 | { | |
354d119a | 81 | XFREE (MTYPE_WORK_QUEUE_NAME, new->name); |
82 | XFREE (MTYPE_WORK_QUEUE, new); | |
83 | ||
84 | return NULL; | |
85 | } | |
86 | ||
87 | new->items->del = (void (*)(void *)) work_queue_item_free; | |
88 | ||
24873f0c | 89 | listnode_add (work_queues, new); |
354d119a | 90 | |
91 | new->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY; | |
190880dc | 92 | |
93 | /* Default values, can be overriden by caller */ | |
190880dc | 94 | new->spec.hold = WORK_QUEUE_DEFAULT_HOLD; |
50596be0 | 95 | new->spec.yield = THREAD_YIELD_TIME_SLOT; |
190880dc | 96 | |
354d119a | 97 | return new; |
98 | } | |
99 | ||
100 | void | |
101 | work_queue_free (struct work_queue *wq) | |
102 | { | |
acde4b86 SH |
103 | if (wq->thread != NULL) |
104 | thread_cancel(wq->thread); | |
105 | ||
354d119a | 106 | /* list_delete frees items via callback */ |
107 | list_delete (wq->items); | |
24873f0c | 108 | listnode_delete (work_queues, wq); |
354d119a | 109 | |
110 | XFREE (MTYPE_WORK_QUEUE_NAME, wq->name); | |
111 | XFREE (MTYPE_WORK_QUEUE, wq); | |
112 | return; | |
113 | } | |
114 | ||
86582682 PJ |
115 | bool |
116 | work_queue_is_scheduled (struct work_queue *wq) | |
117 | { | |
118 | return (wq->thread != NULL); | |
119 | } | |
120 | ||
f63f06da | 121 | static int |
269d74fd | 122 | work_queue_schedule (struct work_queue *wq, unsigned int delay) |
123 | { | |
124 | /* if appropriate, schedule work queue thread */ | |
6ce80bdb | 125 | if ( CHECK_FLAG (wq->flags, WQ_UNPLUGGED) |
269d74fd | 126 | && (wq->thread == NULL) |
127 | && (listcount (wq->items) > 0) ) | |
128 | { | |
129 | wq->thread = thread_add_background (wq->master, work_queue_run, | |
130 | wq, delay); | |
50596be0 DS |
131 | /* set thread yield time, if needed */ |
132 | if (wq->thread && wq->spec.yield != THREAD_YIELD_TIME_SLOT) | |
133 | thread_set_yield_time (wq->thread, wq->spec.yield); | |
269d74fd | 134 | return 1; |
135 | } | |
136 | else | |
137 | return 0; | |
138 | } | |
139 | ||
354d119a | 140 | void |
141 | work_queue_add (struct work_queue *wq, void *data) | |
142 | { | |
143 | struct work_queue_item *item; | |
144 | ||
145 | assert (wq); | |
146 | ||
147 | if (!(item = work_queue_item_new (wq))) | |
148 | { | |
149 | zlog_err ("%s: unable to get new queue item", __func__); | |
150 | return; | |
151 | } | |
152 | ||
153 | item->data = data; | |
e96f9203 | 154 | listnode_add (wq->items, item); |
354d119a | 155 | |
306d8890 | 156 | work_queue_schedule (wq, wq->spec.hold); |
354d119a | 157 | |
158 | return; | |
159 | } | |
160 | ||
161 | static void | |
162 | work_queue_item_remove (struct work_queue *wq, struct listnode *ln) | |
163 | { | |
164 | struct work_queue_item *item = listgetdata (ln); | |
165 | ||
166 | assert (item && item->data); | |
167 | ||
168 | /* call private data deletion callback if needed */ | |
169 | if (wq->spec.del_item_data) | |
889e9311 | 170 | wq->spec.del_item_data (wq, item->data); |
354d119a | 171 | |
172 | list_delete_node (wq->items, ln); | |
173 | work_queue_item_free (item); | |
174 | ||
175 | return; | |
176 | } | |
177 | ||
178 | static void | |
179 | work_queue_item_requeue (struct work_queue *wq, struct listnode *ln) | |
180 | { | |
181 | LISTNODE_DETACH (wq->items, ln); | |
182 | LISTNODE_ATTACH (wq->items, ln); /* attach to end of list */ | |
183 | } | |
184 | ||
49d41a26 DS |
185 | DEFUN (show_work_queues, |
186 | show_work_queues_cmd, | |
187 | "show work-queues", | |
188 | SHOW_STR | |
189 | "Work Queue information\n") | |
354d119a | 190 | { |
191 | struct listnode *node; | |
192 | struct work_queue *wq; | |
354d119a | 193 | |
194 | vty_out (vty, | |
50596be0 DS |
195 | "%c %8s %5s %8s %8s %21s%s", |
196 | ' ', "List","(ms) ","Q. Runs","Yields","Cycle Counts ", | |
354d119a | 197 | VTY_NEWLINE); |
198 | vty_out (vty, | |
50596be0 | 199 | "%c %8s %5s %8s %8s %7s %6s %8s %6s %s%s", |
306d8890 | 200 | 'P', |
354d119a | 201 | "Items", |
306d8890 | 202 | "Hold", |
50596be0 DS |
203 | "Total","Total", |
204 | "Best","Gran.","Total","Avg.", | |
354d119a | 205 | "Name", |
206 | VTY_NEWLINE); | |
207 | ||
24873f0c | 208 | for (ALL_LIST_ELEMENTS_RO (work_queues, node, wq)) |
354d119a | 209 | { |
50596be0 | 210 | vty_out (vty,"%c %8d %5d %8ld %8ld %7d %6d %8ld %6u %s%s", |
6ce80bdb | 211 | (CHECK_FLAG (wq->flags, WQ_UNPLUGGED) ? ' ' : 'P'), |
354d119a | 212 | listcount (wq->items), |
306d8890 | 213 | wq->spec.hold, |
50596be0 DS |
214 | wq->runs, wq->yields, |
215 | wq->cycles.best, wq->cycles.granularity, wq->cycles.total, | |
84369684 | 216 | (wq->runs) ? |
217 | (unsigned int) (wq->cycles.total / wq->runs) : 0, | |
354d119a | 218 | wq->name, |
219 | VTY_NEWLINE); | |
220 | } | |
221 | ||
222 | return CMD_SUCCESS; | |
223 | } | |
224 | ||
269d74fd | 225 | /* 'plug' a queue: Stop it from being scheduled, |
226 | * ie: prevent the queue from draining. | |
227 | */ | |
228 | void | |
229 | work_queue_plug (struct work_queue *wq) | |
230 | { | |
231 | if (wq->thread) | |
232 | thread_cancel (wq->thread); | |
233 | ||
234 | wq->thread = NULL; | |
235 | ||
6ce80bdb | 236 | UNSET_FLAG (wq->flags, WQ_UNPLUGGED); |
269d74fd | 237 | } |
238 | ||
239 | /* unplug queue, schedule it again, if appropriate | |
240 | * Ie: Allow the queue to be drained again | |
241 | */ | |
242 | void | |
243 | work_queue_unplug (struct work_queue *wq) | |
244 | { | |
6ce80bdb | 245 | SET_FLAG (wq->flags, WQ_UNPLUGGED); |
269d74fd | 246 | |
247 | /* if thread isnt already waiting, add one */ | |
306d8890 | 248 | work_queue_schedule (wq, wq->spec.hold); |
269d74fd | 249 | } |
250 | ||
354d119a | 251 | /* timer thread to process a work queue |
252 | * will reschedule itself if required, | |
253 | * otherwise work_queue_item_add | |
254 | */ | |
255 | int | |
256 | work_queue_run (struct thread *thread) | |
257 | { | |
258 | struct work_queue *wq; | |
259 | struct work_queue_item *item; | |
260 | wq_item_status ret; | |
261 | unsigned int cycles = 0; | |
262 | struct listnode *node, *nnode; | |
263 | char yielded = 0; | |
264 | ||
265 | wq = THREAD_ARG (thread); | |
266 | wq->thread = NULL; | |
267 | ||
268 | assert (wq && wq->items); | |
269 | ||
270 | /* calculate cycle granularity: | |
50596be0 DS |
271 | * list iteration == 1 run |
272 | * listnode processing == 1 cycle | |
354d119a | 273 | * granularity == # cycles between checks whether we should yield. |
274 | * | |
275 | * granularity should be > 0, and can increase slowly after each run to | |
276 | * provide some hysteris, but not past cycles.best or 2*cycles. | |
277 | * | |
278 | * Best: starts low, can only increase | |
279 | * | |
213d8dad PJ |
280 | * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased |
281 | * if we run to end of time slot, can increase otherwise | |
282 | * by a small factor. | |
354d119a | 283 | * |
284 | * We could use just the average and save some work, however we want to be | |
285 | * able to adjust quickly to CPU pressure. Average wont shift much if | |
286 | * daemon has been running a long time. | |
287 | */ | |
288 | if (wq->cycles.granularity == 0) | |
289 | wq->cycles.granularity = WORK_QUEUE_MIN_GRANULARITY; | |
290 | ||
291 | for (ALL_LIST_ELEMENTS (wq->items, node, nnode, item)) | |
292 | { | |
293 | assert (item && item->data); | |
294 | ||
295 | /* dont run items which are past their allowed retries */ | |
84369684 | 296 | if (item->ran > wq->spec.max_retries) |
354d119a | 297 | { |
298 | /* run error handler, if any */ | |
299 | if (wq->spec.errorfunc) | |
300 | wq->spec.errorfunc (wq, item->data); | |
301 | work_queue_item_remove (wq, node); | |
302 | continue; | |
303 | } | |
304 | ||
305 | /* run and take care of items that want to be retried immediately */ | |
306 | do | |
307 | { | |
889e9311 | 308 | ret = wq->spec.workfunc (wq, item->data); |
84369684 | 309 | item->ran++; |
354d119a | 310 | } |
311 | while ((ret == WQ_RETRY_NOW) | |
84369684 | 312 | && (item->ran < wq->spec.max_retries)); |
354d119a | 313 | |
314 | switch (ret) | |
315 | { | |
269d74fd | 316 | case WQ_QUEUE_BLOCKED: |
317 | { | |
318 | /* decrement item->ran again, cause this isn't an item | |
319 | * specific error, and fall through to WQ_RETRY_LATER | |
320 | */ | |
321 | item->ran--; | |
322 | } | |
354d119a | 323 | case WQ_RETRY_LATER: |
324 | { | |
354d119a | 325 | goto stats; |
326 | } | |
327 | case WQ_REQUEUE: | |
328 | { | |
e96f9203 | 329 | item->ran--; |
354d119a | 330 | work_queue_item_requeue (wq, node); |
50596be0 DS |
331 | /* If a single node is being used with a meta-queue (e.g., zebra), |
332 | * update the next node as we don't want to exit the thread and | |
333 | * reschedule it after every node. By definition, WQ_REQUEUE is | |
334 | * meant to continue the processing; the yield logic will kick in | |
335 | * to terminate the thread when time has exceeded. | |
336 | */ | |
337 | if (nnode == NULL) | |
338 | nnode = node; | |
354d119a | 339 | break; |
340 | } | |
341 | case WQ_RETRY_NOW: | |
269d74fd | 342 | /* a RETRY_NOW that gets here has exceeded max_tries, same as ERROR */ |
354d119a | 343 | case WQ_ERROR: |
344 | { | |
345 | if (wq->spec.errorfunc) | |
346 | wq->spec.errorfunc (wq, item); | |
347 | } | |
348 | /* fall through here is deliberate */ | |
349 | case WQ_SUCCESS: | |
350 | default: | |
351 | { | |
352 | work_queue_item_remove (wq, node); | |
353 | break; | |
354 | } | |
355 | } | |
356 | ||
357 | /* completed cycle */ | |
358 | cycles++; | |
359 | ||
360 | /* test if we should yield */ | |
361 | if ( !(cycles % wq->cycles.granularity) | |
362 | && thread_should_yield (thread)) | |
363 | { | |
364 | yielded = 1; | |
365 | goto stats; | |
366 | } | |
367 | } | |
368 | ||
369 | stats: | |
370 | ||
3322055b | 371 | #define WQ_HYSTERESIS_FACTOR 4 |
354d119a | 372 | |
373 | /* we yielded, check whether granularity should be reduced */ | |
374 | if (yielded && (cycles < wq->cycles.granularity)) | |
375 | { | |
50596be0 | 376 | wq->cycles.granularity = ((cycles > 0) ? cycles |
354d119a | 377 | : WORK_QUEUE_MIN_GRANULARITY); |
378 | } | |
3322055b PJ |
379 | /* otherwise, should granularity increase? */ |
380 | else if (cycles >= (wq->cycles.granularity)) | |
354d119a | 381 | { |
382 | if (cycles > wq->cycles.best) | |
383 | wq->cycles.best = cycles; | |
50596be0 | 384 | |
3322055b PJ |
385 | /* along with yielded check, provides hysteresis for granularity */ |
386 | if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR | |
387 | * WQ_HYSTERESIS_FACTOR)) | |
388 | wq->cycles.granularity *= WQ_HYSTERESIS_FACTOR; /* quick ramp-up */ | |
389 | else if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR)) | |
390 | wq->cycles.granularity += WQ_HYSTERESIS_FACTOR; | |
354d119a | 391 | } |
392 | #undef WQ_HYSTERIS_FACTOR | |
393 | ||
394 | wq->runs++; | |
395 | wq->cycles.total += cycles; | |
50596be0 DS |
396 | if (yielded) |
397 | wq->yields++; | |
354d119a | 398 | |
399 | #if 0 | |
400 | printf ("%s: cycles %d, new: best %d, worst %d\n", | |
401 | __func__, cycles, wq->cycles.best, wq->cycles.granularity); | |
402 | #endif | |
403 | ||
269d74fd | 404 | /* Is the queue done yet? If it is, call the completion callback. */ |
354d119a | 405 | if (listcount (wq->items) > 0) |
306d8890 | 406 | work_queue_schedule (wq, 0); |
407 | else if (wq->spec.completion_func) | |
408 | wq->spec.completion_func (wq); | |
269d74fd | 409 | |
354d119a | 410 | return 0; |
411 | } |