]> git.proxmox.com Git - mirror_frr.git/blame - lib/thread.c
lib: use traditional yacc empty statement
[mirror_frr.git] / lib / thread.c
CommitLineData
718e3744 1/* Thread management routine
2 * Copyright (C) 1998, 2000 Kunihiro Ishiguro <kunihiro@zebra.org>
3 *
4 * This file is part of GNU Zebra.
5 *
6 * GNU Zebra is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2, or (at your option) any
9 * later version.
10 *
11 * GNU Zebra is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with GNU Zebra; see the file COPYING. If not, write to the Free
18 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
19 * 02111-1307, USA.
20 */
21
22/* #define DEBUG */
23
24#include <zebra.h>
308d14ae 25#include <sys/resource.h>
718e3744 26
27#include "thread.h"
28#include "memory.h"
29#include "log.h"
e04ab74d 30#include "hash.h"
4becea72 31#include "pqueue.h"
e04ab74d 32#include "command.h"
05c447dd 33#include "sigevent.h"
d6be5fb9 34
4a1ab8e4
DL
35DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread")
36DEFINE_MTYPE_STATIC(LIB, THREAD_MASTER, "Thread master")
37DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats")
38
3b96b781
HT
39#if defined(__APPLE__)
40#include <mach/mach.h>
41#include <mach/mach_time.h>
42#endif
43
db9c0df9 44/* Recent absolute time of day */
8b70d0b0 45struct timeval recent_time;
db9c0df9
PJ
46/* Relative time, since startup */
47static struct timeval relative_time;
6b0655a2 48
e04ab74d 49static struct hash *cpu_record = NULL;
6b0655a2 50
8b70d0b0 51/* Adjust so that tv_usec is in the range [0,TIMER_SECOND_MICRO).
52 And change negative values to 0. */
a48b4e6d 53static struct timeval
718e3744 54timeval_adjust (struct timeval a)
55{
56 while (a.tv_usec >= TIMER_SECOND_MICRO)
57 {
58 a.tv_usec -= TIMER_SECOND_MICRO;
59 a.tv_sec++;
60 }
61
62 while (a.tv_usec < 0)
63 {
64 a.tv_usec += TIMER_SECOND_MICRO;
65 a.tv_sec--;
66 }
67
68 if (a.tv_sec < 0)
8b70d0b0 69 /* Change negative timeouts to 0. */
70 a.tv_sec = a.tv_usec = 0;
718e3744 71
72 return a;
73}
74
75static struct timeval
76timeval_subtract (struct timeval a, struct timeval b)
77{
78 struct timeval ret;
79
80 ret.tv_usec = a.tv_usec - b.tv_usec;
81 ret.tv_sec = a.tv_sec - b.tv_sec;
82
83 return timeval_adjust (ret);
84}
85
8b70d0b0 86static long
718e3744 87timeval_cmp (struct timeval a, struct timeval b)
88{
89 return (a.tv_sec == b.tv_sec
90 ? a.tv_usec - b.tv_usec : a.tv_sec - b.tv_sec);
91}
92
cf744958 93unsigned long
718e3744 94timeval_elapsed (struct timeval a, struct timeval b)
95{
96 return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO)
97 + (a.tv_usec - b.tv_usec));
98}
6b0655a2 99
db9c0df9
PJ
100/* gettimeofday wrapper, to keep recent_time updated */
101static int
102quagga_gettimeofday (struct timeval *tv)
103{
104 int ret;
105
106 assert (tv);
107
108 if (!(ret = gettimeofday (&recent_time, NULL)))
109 {
db9c0df9
PJ
110 /* avoid copy if user passed recent_time pointer.. */
111 if (tv != &recent_time)
112 *tv = recent_time;
113 return 0;
114 }
115 return ret;
116}
117
118static int
119quagga_get_relative (struct timeval *tv)
120{
121 int ret;
122
123#ifdef HAVE_CLOCK_MONOTONIC
124 {
125 struct timespec tp;
126 if (!(ret = clock_gettime (CLOCK_MONOTONIC, &tp)))
127 {
128 relative_time.tv_sec = tp.tv_sec;
129 relative_time.tv_usec = tp.tv_nsec / 1000;
130 }
131 }
3b96b781
HT
132#elif defined(__APPLE__)
133 {
134 uint64_t ticks;
135 uint64_t useconds;
136 static mach_timebase_info_data_t timebase_info;
137
138 ticks = mach_absolute_time();
139 if (timebase_info.denom == 0)
140 mach_timebase_info(&timebase_info);
141
142 useconds = ticks * timebase_info.numer / timebase_info.denom / 1000;
143 relative_time.tv_sec = useconds / 1000000;
144 relative_time.tv_usec = useconds % 1000000;
145
146 return 0;
147 }
148#else /* !HAVE_CLOCK_MONOTONIC && !__APPLE__ */
16f5949d 149#error no monotonic clock on this system
db9c0df9
PJ
150#endif /* HAVE_CLOCK_MONOTONIC */
151
152 if (tv)
153 *tv = relative_time;
154
155 return ret;
156}
157
db9c0df9
PJ
158/* Exported Quagga timestamp function.
159 * Modelled on POSIX clock_gettime.
160 */
161int
162quagga_gettime (enum quagga_clkid clkid, struct timeval *tv)
163{
164 switch (clkid)
165 {
db9c0df9
PJ
166 case QUAGGA_CLK_MONOTONIC:
167 return quagga_get_relative (tv);
db9c0df9
PJ
168 default:
169 errno = EINVAL;
170 return -1;
171 }
172}
173
a05d8b7a
DL
174time_t
175quagga_monotime (void)
176{
177 struct timeval tv;
178 quagga_get_relative(&tv);
179 return tv.tv_sec;
180}
181
db9c0df9
PJ
182/* Public export of recent_relative_time by value */
183struct timeval
184recent_relative_time (void)
185{
186 return relative_time;
187}
6b0655a2 188
a48b4e6d 189static unsigned int
e04ab74d 190cpu_record_hash_key (struct cpu_thread_history *a)
191{
8cc4198f 192 return (uintptr_t) a->func;
e04ab74d 193}
194
195static int
ffe11cfb
SH
196cpu_record_hash_cmp (const struct cpu_thread_history *a,
197 const struct cpu_thread_history *b)
e04ab74d 198{
199 return a->func == b->func;
200}
201
8cc4198f 202static void *
e04ab74d 203cpu_record_hash_alloc (struct cpu_thread_history *a)
204{
205 struct cpu_thread_history *new;
039b9577 206 new = XCALLOC (MTYPE_THREAD_STATS, sizeof (struct cpu_thread_history));
e04ab74d 207 new->func = a->func;
9c7753e4 208 new->funcname = a->funcname;
e04ab74d 209 return new;
210}
211
228da428
CC
212static void
213cpu_record_hash_free (void *a)
214{
215 struct cpu_thread_history *hist = a;
216
228da428
CC
217 XFREE (MTYPE_THREAD_STATS, hist);
218}
219
f63f06da 220static void
e04ab74d 221vty_out_cpu_thread_history(struct vty* vty,
222 struct cpu_thread_history *a)
223{
0cf74a83 224 vty_out(vty, "%10ld.%03ld %9d %8ld %9ld %8ld %9ld",
8b70d0b0 225 a->cpu.total/1000, a->cpu.total%1000, a->total_calls,
226 a->cpu.total/a->total_calls, a->cpu.max,
227 a->real.total/a->total_calls, a->real.max);
8b70d0b0 228 vty_out(vty, " %c%c%c%c%c%c %s%s",
e04ab74d 229 a->types & (1 << THREAD_READ) ? 'R':' ',
230 a->types & (1 << THREAD_WRITE) ? 'W':' ',
231 a->types & (1 << THREAD_TIMER) ? 'T':' ',
232 a->types & (1 << THREAD_EVENT) ? 'E':' ',
233 a->types & (1 << THREAD_EXECUTE) ? 'X':' ',
a48b4e6d 234 a->types & (1 << THREAD_BACKGROUND) ? 'B' : ' ',
e04ab74d 235 a->funcname, VTY_NEWLINE);
236}
237
238static void
239cpu_record_hash_print(struct hash_backet *bucket,
240 void *args[])
241{
242 struct cpu_thread_history *totals = args[0];
243 struct vty *vty = args[1];
41b2373c 244 thread_type *filter = args[2];
e04ab74d 245 struct cpu_thread_history *a = bucket->data;
f48f65d2 246
e04ab74d 247 if ( !(a->types & *filter) )
248 return;
249 vty_out_cpu_thread_history(vty,a);
e04ab74d 250 totals->total_calls += a->total_calls;
8b70d0b0 251 totals->real.total += a->real.total;
252 if (totals->real.max < a->real.max)
253 totals->real.max = a->real.max;
8b70d0b0 254 totals->cpu.total += a->cpu.total;
255 if (totals->cpu.max < a->cpu.max)
256 totals->cpu.max = a->cpu.max;
e04ab74d 257}
258
259static void
41b2373c 260cpu_record_print(struct vty *vty, thread_type filter)
e04ab74d 261{
262 struct cpu_thread_history tmp;
263 void *args[3] = {&tmp, vty, &filter};
264
265 memset(&tmp, 0, sizeof tmp);
9c7753e4 266 tmp.funcname = "TOTAL";
e04ab74d 267 tmp.types = filter;
268
8b70d0b0 269 vty_out(vty, "%21s %18s %18s%s",
24f5e2fc 270 "", "CPU (user+system):", "Real (wall-clock):", VTY_NEWLINE);
0cf74a83 271 vty_out(vty, " Runtime(ms) Invoked Avg uSec Max uSecs");
8b70d0b0 272 vty_out(vty, " Avg uSec Max uSecs");
8b70d0b0 273 vty_out(vty, " Type Thread%s", VTY_NEWLINE);
e04ab74d 274 hash_iterate(cpu_record,
275 (void(*)(struct hash_backet*,void*))cpu_record_hash_print,
276 args);
277
278 if (tmp.total_calls > 0)
279 vty_out_cpu_thread_history(vty, &tmp);
280}
281
49d41a26
DS
282DEFUN (show_thread_cpu,
283 show_thread_cpu_cmd,
284 "show thread cpu [FILTER]",
285 SHOW_STR
286 "Thread information\n"
287 "Thread CPU usage\n"
288 "Display filter (rwtexb)\n")
e04ab74d 289{
c349116d 290 int idx_filter = 3;
e04ab74d 291 int i = 0;
41b2373c 292 thread_type filter = (thread_type) -1U;
e04ab74d 293
1bf1b05a 294 if (argc > 3)
e04ab74d 295 {
296 filter = 0;
c349116d 297 while (argv[idx_filter]->arg[i] != '\0')
e04ab74d 298 {
c349116d 299 switch ( argv[idx_filter]->arg[i] )
e04ab74d 300 {
301 case 'r':
302 case 'R':
303 filter |= (1 << THREAD_READ);
304 break;
305 case 'w':
306 case 'W':
307 filter |= (1 << THREAD_WRITE);
308 break;
309 case 't':
310 case 'T':
311 filter |= (1 << THREAD_TIMER);
312 break;
313 case 'e':
314 case 'E':
315 filter |= (1 << THREAD_EVENT);
316 break;
317 case 'x':
318 case 'X':
319 filter |= (1 << THREAD_EXECUTE);
320 break;
a48b4e6d 321 case 'b':
322 case 'B':
323 filter |= (1 << THREAD_BACKGROUND);
324 break;
e04ab74d 325 default:
326 break;
327 }
328 ++i;
329 }
330 if (filter == 0)
331 {
a48b4e6d 332 vty_out(vty, "Invalid filter \"%s\" specified,"
333 " must contain at least one of 'RWTEXB'%s",
c349116d 334 argv[idx_filter]->arg, VTY_NEWLINE);
e04ab74d 335 return CMD_WARNING;
336 }
337 }
338
339 cpu_record_print(vty, filter);
340 return CMD_SUCCESS;
341}
e276eb82
PJ
342
343static void
344cpu_record_hash_clear (struct hash_backet *bucket,
345 void *args)
346{
347 thread_type *filter = args;
348 struct cpu_thread_history *a = bucket->data;
f48f65d2 349
e276eb82
PJ
350 if ( !(a->types & *filter) )
351 return;
352
353 hash_release (cpu_record, bucket->data);
354}
355
356static void
357cpu_record_clear (thread_type filter)
358{
359 thread_type *tmp = &filter;
360 hash_iterate (cpu_record,
361 (void (*) (struct hash_backet*,void*)) cpu_record_hash_clear,
362 tmp);
363}
364
49d41a26
DS
365DEFUN (clear_thread_cpu,
366 clear_thread_cpu_cmd,
367 "clear thread cpu [FILTER]",
368 "Clear stored data\n"
369 "Thread information\n"
370 "Thread CPU usage\n"
371 "Display filter (rwtexb)\n")
e276eb82 372{
c349116d 373 int idx_filter = 3;
e276eb82
PJ
374 int i = 0;
375 thread_type filter = (thread_type) -1U;
376
1bf1b05a 377 if (argc > 3)
e276eb82
PJ
378 {
379 filter = 0;
c349116d 380 while (argv[idx_filter]->arg[i] != '\0')
e276eb82 381 {
c349116d 382 switch ( argv[idx_filter]->arg[i] )
e276eb82
PJ
383 {
384 case 'r':
385 case 'R':
386 filter |= (1 << THREAD_READ);
387 break;
388 case 'w':
389 case 'W':
390 filter |= (1 << THREAD_WRITE);
391 break;
392 case 't':
393 case 'T':
394 filter |= (1 << THREAD_TIMER);
395 break;
396 case 'e':
397 case 'E':
398 filter |= (1 << THREAD_EVENT);
399 break;
400 case 'x':
401 case 'X':
402 filter |= (1 << THREAD_EXECUTE);
403 break;
404 case 'b':
405 case 'B':
406 filter |= (1 << THREAD_BACKGROUND);
407 break;
408 default:
409 break;
410 }
411 ++i;
412 }
413 if (filter == 0)
414 {
415 vty_out(vty, "Invalid filter \"%s\" specified,"
416 " must contain at least one of 'RWTEXB'%s",
c349116d 417 argv[idx_filter]->arg, VTY_NEWLINE);
e276eb82
PJ
418 return CMD_WARNING;
419 }
420 }
421
422 cpu_record_clear (filter);
423 return CMD_SUCCESS;
424}
6b0655a2 425
4becea72
CF
426static int
427thread_timer_cmp(void *a, void *b)
428{
429 struct thread *thread_a = a;
430 struct thread *thread_b = b;
431
432 long cmp = timeval_cmp(thread_a->u.sands, thread_b->u.sands);
433
434 if (cmp < 0)
435 return -1;
436 if (cmp > 0)
437 return 1;
438 return 0;
439}
440
441static void
442thread_timer_update(void *node, int actual_position)
443{
444 struct thread *thread = node;
445
446 thread->index = actual_position;
447}
448
718e3744 449/* Allocate new thread master. */
450struct thread_master *
0a95a0d0 451thread_master_create (void)
718e3744 452{
4becea72 453 struct thread_master *rv;
308d14ae
DV
454 struct rlimit limit;
455
456 getrlimit(RLIMIT_NOFILE, &limit);
4becea72 457
e04ab74d 458 if (cpu_record == NULL)
8cc4198f 459 cpu_record
90645f55
SH
460 = hash_create ((unsigned int (*) (void *))cpu_record_hash_key,
461 (int (*) (const void *, const void *))cpu_record_hash_cmp);
4becea72
CF
462
463 rv = XCALLOC (MTYPE_THREAD_MASTER, sizeof (struct thread_master));
308d14ae
DV
464 if (rv == NULL)
465 {
466 return NULL;
467 }
468
469 rv->fd_limit = (int)limit.rlim_cur;
470 rv->read = XCALLOC (MTYPE_THREAD, sizeof (struct thread *) * rv->fd_limit);
471 if (rv->read == NULL)
472 {
473 XFREE (MTYPE_THREAD_MASTER, rv);
474 return NULL;
475 }
476
477 rv->write = XCALLOC (MTYPE_THREAD, sizeof (struct thread *) * rv->fd_limit);
478 if (rv->write == NULL)
479 {
480 XFREE (MTYPE_THREAD, rv->read);
481 XFREE (MTYPE_THREAD_MASTER, rv);
482 return NULL;
483 }
4becea72
CF
484
485 /* Initialize the timer queues */
486 rv->timer = pqueue_create();
487 rv->background = pqueue_create();
488 rv->timer->cmp = rv->background->cmp = thread_timer_cmp;
489 rv->timer->update = rv->background->update = thread_timer_update;
490
0a95a0d0 491#if defined(HAVE_POLL)
b53e10a1 492 rv->handler.pfdsize = rv->fd_limit;
0a95a0d0
DS
493 rv->handler.pfdcount = 0;
494 rv->handler.pfds = (struct pollfd *) malloc (sizeof (struct pollfd) * rv->handler.pfdsize);
495 memset (rv->handler.pfds, 0, sizeof (struct pollfd) * rv->handler.pfdsize);
496#endif
4becea72 497 return rv;
718e3744 498}
499
500/* Add a new thread to the list. */
501static void
502thread_list_add (struct thread_list *list, struct thread *thread)
503{
504 thread->next = NULL;
505 thread->prev = list->tail;
506 if (list->tail)
507 list->tail->next = thread;
508 else
509 list->head = thread;
510 list->tail = thread;
511 list->count++;
512}
513
718e3744 514/* Delete a thread from the list. */
515static struct thread *
516thread_list_delete (struct thread_list *list, struct thread *thread)
517{
518 if (thread->next)
519 thread->next->prev = thread->prev;
520 else
521 list->tail = thread->prev;
522 if (thread->prev)
523 thread->prev->next = thread->next;
524 else
525 list->head = thread->next;
526 thread->next = thread->prev = NULL;
527 list->count--;
528 return thread;
529}
530
308d14ae
DV
531static void
532thread_delete_fd (struct thread **thread_array, struct thread *thread)
533{
534 thread_array[thread->u.fd] = NULL;
535}
536
537static void
538thread_add_fd (struct thread **thread_array, struct thread *thread)
539{
540 thread_array[thread->u.fd] = thread;
541}
542
495f0b13
DS
543/* Thread list is empty or not. */
544static int
545thread_empty (struct thread_list *list)
546{
547 return list->head ? 0 : 1;
548}
549
550/* Delete top of the list and return it. */
551static struct thread *
552thread_trim_head (struct thread_list *list)
553{
554 if (!thread_empty (list))
555 return thread_list_delete (list, list->head);
556 return NULL;
557}
558
718e3744 559/* Move thread to unuse list. */
560static void
561thread_add_unuse (struct thread_master *m, struct thread *thread)
562{
a48b4e6d 563 assert (m != NULL && thread != NULL);
718e3744 564 assert (thread->next == NULL);
565 assert (thread->prev == NULL);
566 assert (thread->type == THREAD_UNUSED);
567 thread_list_add (&m->unuse, thread);
568}
569
570/* Free all unused thread. */
571static void
572thread_list_free (struct thread_master *m, struct thread_list *list)
573{
574 struct thread *t;
575 struct thread *next;
576
577 for (t = list->head; t; t = next)
578 {
579 next = t->next;
580 XFREE (MTYPE_THREAD, t);
581 list->count--;
582 m->alloc--;
583 }
584}
585
308d14ae
DV
586static void
587thread_array_free (struct thread_master *m, struct thread **thread_array)
588{
589 struct thread *t;
590 int index;
591
592 for (index = 0; index < m->fd_limit; ++index)
593 {
594 t = thread_array[index];
595 if (t)
596 {
597 thread_array[index] = NULL;
598 XFREE (MTYPE_THREAD, t);
599 m->alloc--;
600 }
601 }
602 XFREE (MTYPE_THREAD, thread_array);
603}
604
4becea72
CF
605static void
606thread_queue_free (struct thread_master *m, struct pqueue *queue)
607{
608 int i;
609
610 for (i = 0; i < queue->size; i++)
611 XFREE(MTYPE_THREAD, queue->array[i]);
612
613 m->alloc -= queue->size;
614 pqueue_delete(queue);
615}
616
495f0b13
DS
617/*
618 * thread_master_free_unused
619 *
620 * As threads are finished with they are put on the
621 * unuse list for later reuse.
622 * If we are shutting down, Free up unused threads
623 * So we can see if we forget to shut anything off
624 */
625void
626thread_master_free_unused (struct thread_master *m)
627{
628 struct thread *t;
629 while ((t = thread_trim_head(&m->unuse)) != NULL)
630 {
631 XFREE(MTYPE_THREAD, t);
632 }
633}
634
718e3744 635/* Stop thread scheduler. */
636void
637thread_master_free (struct thread_master *m)
638{
308d14ae
DV
639 thread_array_free (m, m->read);
640 thread_array_free (m, m->write);
4becea72 641 thread_queue_free (m, m->timer);
718e3744 642 thread_list_free (m, &m->event);
643 thread_list_free (m, &m->ready);
644 thread_list_free (m, &m->unuse);
4becea72 645 thread_queue_free (m, m->background);
0a95a0d0
DS
646
647#if defined(HAVE_POLL)
648 XFREE (MTYPE_THREAD_MASTER, m->handler.pfds);
649#endif
718e3744 650 XFREE (MTYPE_THREAD_MASTER, m);
228da428
CC
651
652 if (cpu_record)
653 {
654 hash_clean (cpu_record, cpu_record_hash_free);
655 hash_free (cpu_record);
656 cpu_record = NULL;
657 }
718e3744 658}
659
718e3744 660/* Return remain time in second. */
661unsigned long
662thread_timer_remain_second (struct thread *thread)
663{
db9c0df9
PJ
664 quagga_get_relative (NULL);
665
666 if (thread->u.sands.tv_sec - relative_time.tv_sec > 0)
667 return thread->u.sands.tv_sec - relative_time.tv_sec;
718e3744 668 else
669 return 0;
670}
671
9c7753e4
DL
672#define debugargdef const char *funcname, const char *schedfrom, int fromln
673#define debugargpass funcname, schedfrom, fromln
e04ab74d 674
6ac44687
CF
675struct timeval
676thread_timer_remain(struct thread *thread)
677{
678 quagga_get_relative(NULL);
679
680 return timeval_subtract(thread->u.sands, relative_time);
681}
682
718e3744 683/* Get new thread. */
684static struct thread *
685thread_get (struct thread_master *m, u_char type,
9c7753e4 686 int (*func) (struct thread *), void *arg, debugargdef)
718e3744 687{
64018324 688 struct thread *thread = thread_trim_head (&m->unuse);
718e3744 689
22714f99 690 if (! thread)
718e3744 691 {
692 thread = XCALLOC (MTYPE_THREAD, sizeof (struct thread));
693 m->alloc++;
694 }
695 thread->type = type;
e04ab74d 696 thread->add_type = type;
718e3744 697 thread->master = m;
698 thread->func = func;
699 thread->arg = arg;
4becea72 700 thread->index = -1;
50596be0 701 thread->yield = THREAD_YIELD_TIME_SLOT; /* default */
4becea72 702
9c7753e4
DL
703 thread->funcname = funcname;
704 thread->schedfrom = schedfrom;
705 thread->schedfrom_line = fromln;
e04ab74d 706
718e3744 707 return thread;
708}
709
aa037235
DS
710#if defined (HAVE_POLL)
711
209a72a6
DS
712#define fd_copy_fd_set(X) (X)
713
0a95a0d0
DS
714/* generic add thread function */
715static struct thread *
aa037235 716generic_thread_add(struct thread_master *m, int (*func) (struct thread *),
9c7753e4 717 void *arg, int fd, int dir, debugargdef)
0a95a0d0
DS
718{
719 struct thread *thread;
720
aa037235 721 u_char type;
0a95a0d0
DS
722 short int event;
723
724 if (dir == THREAD_READ)
aa037235
DS
725 {
726 event = (POLLIN | POLLHUP);
727 type = THREAD_READ;
728 }
0a95a0d0 729 else
aa037235
DS
730 {
731 event = (POLLOUT | POLLHUP);
732 type = THREAD_WRITE;
733 }
0a95a0d0
DS
734
735 nfds_t queuepos = m->handler.pfdcount;
736 nfds_t i=0;
aa037235 737 for (i=0; i<m->handler.pfdcount; i++)
0a95a0d0
DS
738 if (m->handler.pfds[i].fd == fd)
739 {
740 queuepos = i;
741 break;
742 }
743
744 /* is there enough space for a new fd? */
b53e10a1 745 assert (queuepos < m->handler.pfdsize);
0a95a0d0 746
69f30024 747 thread = thread_get (m, type, func, arg, debugargpass);
0a95a0d0
DS
748 m->handler.pfds[queuepos].fd = fd;
749 m->handler.pfds[queuepos].events |= event;
750 if (queuepos == m->handler.pfdcount)
751 m->handler.pfdcount++;
0a95a0d0 752
cc7165b6
DS
753 return thread;
754}
aa037235
DS
755#else
756
757#define fd_copy_fd_set(X) (X)
758#endif
cc7165b6 759
209a72a6 760static int
0a95a0d0 761fd_select (struct thread_master *m, int size, thread_fd_set *read, thread_fd_set *write, thread_fd_set *except, struct timeval *timer_wait)
209a72a6 762{
0a95a0d0
DS
763 int num;
764#if defined(HAVE_POLL)
765 /* recalc timeout for poll. Attention NULL pointer is no timeout with
766 select, where with poll no timeount is -1 */
767 int timeout = -1;
768 if (timer_wait != NULL)
769 timeout = (timer_wait->tv_sec*1000) + (timer_wait->tv_usec/1000);
770
771 num = poll (m->handler.pfds, m->handler.pfdcount + m->handler.pfdcountsnmp, timeout);
772#else
773 num = select (size, read, write, except, timer_wait);
774#endif
775
776 return num;
209a72a6
DS
777}
778
779static int
0a95a0d0 780fd_is_set (struct thread *thread, thread_fd_set *fdset, int pos)
209a72a6 781{
0a95a0d0
DS
782#if defined(HAVE_POLL)
783 return 1;
784#else
785 return FD_ISSET (THREAD_FD (thread), fdset);
786#endif
209a72a6
DS
787}
788
209a72a6 789static int
0a95a0d0 790fd_clear_read_write (struct thread *thread)
209a72a6 791{
0a95a0d0
DS
792#if !defined(HAVE_POLL)
793 thread_fd_set *fdset = NULL;
794 int fd = THREAD_FD (thread);
795
796 if (thread->type == THREAD_READ)
797 fdset = &thread->master->handler.readfd;
798 else
799 fdset = &thread->master->handler.writefd;
800
209a72a6
DS
801 if (!FD_ISSET (fd, fdset))
802 return 0;
803
804 FD_CLR (fd, fdset);
0a95a0d0 805#endif
209a72a6
DS
806 return 1;
807}
808
718e3744 809/* Add new read thread. */
810struct thread *
8dadcae7 811funcname_thread_add_read_write (int dir, struct thread_master *m,
9c7753e4
DL
812 int (*func) (struct thread *), void *arg, int fd,
813 debugargdef)
718e3744 814{
8dadcae7 815 struct thread *thread = NULL;
718e3744 816
aa037235
DS
817#if !defined(HAVE_POLL)
818 thread_fd_set *fdset = NULL;
819 if (dir == THREAD_READ)
820 fdset = &m->handler.readfd;
821 else
822 fdset = &m->handler.writefd;
823#endif
824
825#if defined (HAVE_POLL)
9c7753e4 826 thread = generic_thread_add(m, func, arg, fd, dir, debugargpass);
aa037235
DS
827
828 if (thread == NULL)
829 return NULL;
830#else
831 if (FD_ISSET (fd, fdset))
832 {
833 zlog (NULL, LOG_WARNING, "There is already %s fd [%d]", (dir = THREAD_READ) ? "read" : "write", fd);
834 return NULL;
835 }
836
837 FD_SET (fd, fdset);
9c7753e4 838 thread = thread_get (m, dir, func, arg, debugargpass);
aa037235 839#endif
0a95a0d0 840
718e3744 841 thread->u.fd = fd;
8dadcae7
DS
842 if (dir == THREAD_READ)
843 thread_add_fd (m->read, thread);
844 else
845 thread_add_fd (m->write, thread);
718e3744 846
847 return thread;
848}
849
98c91ac6 850static struct thread *
851funcname_thread_add_timer_timeval (struct thread_master *m,
852 int (*func) (struct thread *),
a48b4e6d 853 int type,
98c91ac6 854 void *arg,
9c7753e4
DL
855 struct timeval *time_relative,
856 debugargdef)
718e3744 857{
718e3744 858 struct thread *thread;
4becea72 859 struct pqueue *queue;
db9c0df9 860 struct timeval alarm_time;
718e3744 861
862 assert (m != NULL);
863
8b70d0b0 864 assert (type == THREAD_TIMER || type == THREAD_BACKGROUND);
a48b4e6d 865 assert (time_relative);
866
4becea72 867 queue = ((type == THREAD_TIMER) ? m->timer : m->background);
9c7753e4 868 thread = thread_get (m, type, func, arg, debugargpass);
718e3744 869
870 /* Do we need jitter here? */
b8192765 871 quagga_get_relative (NULL);
db9c0df9
PJ
872 alarm_time.tv_sec = relative_time.tv_sec + time_relative->tv_sec;
873 alarm_time.tv_usec = relative_time.tv_usec + time_relative->tv_usec;
8b70d0b0 874 thread->u.sands = timeval_adjust(alarm_time);
718e3744 875
4becea72 876 pqueue_enqueue(thread, queue);
9e867fe6 877 return thread;
878}
879
98c91ac6 880
881/* Add timer event thread. */
9e867fe6 882struct thread *
98c91ac6 883funcname_thread_add_timer (struct thread_master *m,
884 int (*func) (struct thread *),
9c7753e4
DL
885 void *arg, long timer,
886 debugargdef)
9e867fe6 887{
98c91ac6 888 struct timeval trel;
9e867fe6 889
890 assert (m != NULL);
891
9076fbd3 892 trel.tv_sec = timer;
98c91ac6 893 trel.tv_usec = 0;
9e867fe6 894
a48b4e6d 895 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER, arg,
9c7753e4 896 &trel, debugargpass);
98c91ac6 897}
9e867fe6 898
98c91ac6 899/* Add timer event thread with "millisecond" resolution */
900struct thread *
901funcname_thread_add_timer_msec (struct thread_master *m,
902 int (*func) (struct thread *),
9c7753e4
DL
903 void *arg, long timer,
904 debugargdef)
98c91ac6 905{
906 struct timeval trel;
9e867fe6 907
98c91ac6 908 assert (m != NULL);
718e3744 909
af04bd7c 910 trel.tv_sec = timer / 1000;
911 trel.tv_usec = 1000*(timer % 1000);
98c91ac6 912
a48b4e6d 913 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER,
9c7753e4 914 arg, &trel, debugargpass);
a48b4e6d 915}
916
d03c4cbd
DL
917/* Add timer event thread with "millisecond" resolution */
918struct thread *
919funcname_thread_add_timer_tv (struct thread_master *m,
920 int (*func) (struct thread *),
921 void *arg, struct timeval *tv,
922 debugargdef)
923{
924 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER,
925 arg, tv, debugargpass);
926}
927
a48b4e6d 928/* Add a background thread, with an optional millisec delay */
929struct thread *
930funcname_thread_add_background (struct thread_master *m,
931 int (*func) (struct thread *),
9c7753e4
DL
932 void *arg, long delay,
933 debugargdef)
a48b4e6d 934{
935 struct timeval trel;
936
937 assert (m != NULL);
938
939 if (delay)
940 {
941 trel.tv_sec = delay / 1000;
942 trel.tv_usec = 1000*(delay % 1000);
943 }
944 else
945 {
946 trel.tv_sec = 0;
947 trel.tv_usec = 0;
948 }
949
950 return funcname_thread_add_timer_timeval (m, func, THREAD_BACKGROUND,
9c7753e4 951 arg, &trel, debugargpass);
718e3744 952}
953
954/* Add simple event thread. */
955struct thread *
e04ab74d 956funcname_thread_add_event (struct thread_master *m,
9c7753e4
DL
957 int (*func) (struct thread *), void *arg, int val,
958 debugargdef)
718e3744 959{
960 struct thread *thread;
961
962 assert (m != NULL);
963
9c7753e4 964 thread = thread_get (m, THREAD_EVENT, func, arg, debugargpass);
718e3744 965 thread->u.val = val;
966 thread_list_add (&m->event, thread);
967
968 return thread;
969}
970
0a95a0d0 971static void
1bba4c93 972thread_cancel_read_or_write (struct thread *thread, short int state)
0a95a0d0
DS
973{
974#if defined(HAVE_POLL)
975 nfds_t i;
976
977 for (i=0;i<thread->master->handler.pfdcount;++i)
978 if (thread->master->handler.pfds[i].fd == thread->u.fd)
979 {
1bba4c93
DS
980 thread->master->handler.pfds[i].events &= ~(state);
981
0a95a0d0 982 /* remove thread fds from pfd list */
1bba4c93
DS
983 if (thread->master->handler.pfds[i].events == 0)
984 {
985 memmove(thread->master->handler.pfds+i,
986 thread->master->handler.pfds+i+1,
987 (thread->master->handler.pfdsize-i-1) * sizeof(struct pollfd));
988 thread->master->handler.pfdcount--;
989 return;
990 }
0a95a0d0
DS
991 }
992#endif
993
994 fd_clear_read_write (thread);
995}
996
718e3744 997/* Cancel thread from scheduler. */
998void
999thread_cancel (struct thread *thread)
1000{
4becea72
CF
1001 struct thread_list *list = NULL;
1002 struct pqueue *queue = NULL;
308d14ae 1003 struct thread **thread_array = NULL;
a48b4e6d 1004
718e3744 1005 switch (thread->type)
1006 {
1007 case THREAD_READ:
1bba4c93
DS
1008#if defined (HAVE_POLL)
1009 thread_cancel_read_or_write (thread, POLLIN | POLLHUP);
1010#else
1011 thread_cancel_read_or_write (thread, 0);
1012#endif
308d14ae 1013 thread_array = thread->master->read;
718e3744 1014 break;
1015 case THREAD_WRITE:
1bba4c93
DS
1016#if defined (HAVE_POLL)
1017 thread_cancel_read_or_write (thread, POLLOUT | POLLHUP);
1018#else
1019 thread_cancel_read_or_write (thread, 0);
1020#endif
308d14ae 1021 thread_array = thread->master->write;
718e3744 1022 break;
1023 case THREAD_TIMER:
4becea72 1024 queue = thread->master->timer;
718e3744 1025 break;
1026 case THREAD_EVENT:
a48b4e6d 1027 list = &thread->master->event;
718e3744 1028 break;
1029 case THREAD_READY:
a48b4e6d 1030 list = &thread->master->ready;
718e3744 1031 break;
a48b4e6d 1032 case THREAD_BACKGROUND:
4becea72 1033 queue = thread->master->background;
8b70d0b0 1034 break;
718e3744 1035 default:
a48b4e6d 1036 return;
718e3744 1037 break;
1038 }
4becea72
CF
1039
1040 if (queue)
1041 {
1042 assert(thread->index >= 0);
1043 assert(thread == queue->array[thread->index]);
1044 pqueue_remove_at(thread->index, queue);
1045 }
1046 else if (list)
1047 {
1048 thread_list_delete (list, thread);
1049 }
308d14ae
DV
1050 else if (thread_array)
1051 {
1052 thread_delete_fd (thread_array, thread);
1053 }
4becea72
CF
1054 else
1055 {
308d14ae 1056 assert(!"Thread should be either in queue or list or array!");
4becea72
CF
1057 }
1058
718e3744 1059 thread->type = THREAD_UNUSED;
1060 thread_add_unuse (thread->master, thread);
1061}
1062
1063/* Delete all events which has argument value arg. */
dc81807a 1064unsigned int
718e3744 1065thread_cancel_event (struct thread_master *m, void *arg)
1066{
dc81807a 1067 unsigned int ret = 0;
718e3744 1068 struct thread *thread;
1069
1070 thread = m->event.head;
1071 while (thread)
1072 {
1073 struct thread *t;
1074
1075 t = thread;
1076 thread = t->next;
1077
1078 if (t->arg == arg)
a48b4e6d 1079 {
dc81807a 1080 ret++;
a48b4e6d 1081 thread_list_delete (&m->event, t);
1082 t->type = THREAD_UNUSED;
1083 thread_add_unuse (m, t);
1084 }
718e3744 1085 }
1b79fcb6
JBD
1086
1087 /* thread can be on the ready list too */
1088 thread = m->ready.head;
1089 while (thread)
1090 {
1091 struct thread *t;
1092
1093 t = thread;
1094 thread = t->next;
1095
1096 if (t->arg == arg)
1097 {
1098 ret++;
1099 thread_list_delete (&m->ready, t);
1100 t->type = THREAD_UNUSED;
1101 thread_add_unuse (m, t);
1102 }
1103 }
dc81807a 1104 return ret;
718e3744 1105}
1106
a48b4e6d 1107static struct timeval *
4becea72 1108thread_timer_wait (struct pqueue *queue, struct timeval *timer_val)
718e3744 1109{
4becea72 1110 if (queue->size)
718e3744 1111 {
4becea72
CF
1112 struct thread *next_timer = queue->array[0];
1113 *timer_val = timeval_subtract (next_timer->u.sands, relative_time);
718e3744 1114 return timer_val;
1115 }
1116 return NULL;
1117}
718e3744 1118
8cc4198f 1119static struct thread *
718e3744 1120thread_run (struct thread_master *m, struct thread *thread,
1121 struct thread *fetch)
1122{
1123 *fetch = *thread;
1124 thread->type = THREAD_UNUSED;
1125 thread_add_unuse (m, thread);
1126 return fetch;
1127}
1128
a48b4e6d 1129static int
0a95a0d0 1130thread_process_fds_helper (struct thread_master *m, struct thread *thread, thread_fd_set *fdset, short int state, int pos)
5d4ccd4e 1131{
5d4ccd4e
DS
1132 struct thread **thread_array;
1133
1134 if (!thread)
1135 return 0;
1136
1137 if (thread->type == THREAD_READ)
0a95a0d0 1138 thread_array = m->read;
5d4ccd4e 1139 else
0a95a0d0 1140 thread_array = m->write;
5d4ccd4e 1141
0a95a0d0 1142 if (fd_is_set (thread, fdset, pos))
5d4ccd4e 1143 {
0a95a0d0 1144 fd_clear_read_write (thread);
5d4ccd4e
DS
1145 thread_delete_fd (thread_array, thread);
1146 thread_list_add (&m->ready, thread);
1147 thread->type = THREAD_READY;
0a95a0d0
DS
1148#if defined(HAVE_POLL)
1149 thread->master->handler.pfds[pos].events &= ~(state);
1150#endif
5d4ccd4e
DS
1151 return 1;
1152 }
1153 return 0;
1154}
1155
0a95a0d0
DS
1156#if defined(HAVE_POLL)
1157
1158#if defined(HAVE_SNMP)
1159/* add snmp fds to poll set */
1160static void
1161add_snmp_pollfds(struct thread_master *m, fd_set *snmpfds, int fdsetsize)
1162{
1163 int i;
1164 m->handler.pfdcountsnmp = m->handler.pfdcount;
1165 /* cycle trough fds and add neccessary fds to poll set */
1166 for (i=0;i<fdsetsize;++i)
1167 {
1168 if (FD_ISSET(i, snmpfds))
1169 {
b53e10a1 1170 assert (m->handler.pfdcountsnmp <= m->handler.pfdsize);
0a95a0d0
DS
1171
1172 m->handler.pfds[m->handler.pfdcountsnmp].fd = i;
1173 m->handler.pfds[m->handler.pfdcountsnmp].events = POLLIN;
1174 m->handler.pfdcountsnmp++;
1175 }
1176 }
1177}
1178#endif
1179
1180/* check poll events */
1181static void
1182check_pollfds(struct thread_master *m, fd_set *readfd, int num)
1183{
1184 nfds_t i = 0;
1185 int ready = 0;
aa037235 1186 for (i = 0; i < m->handler.pfdcount && ready < num ; ++i)
0a95a0d0
DS
1187 {
1188 /* no event for current fd? immideatly continue */
1189 if(m->handler.pfds[i].revents == 0)
1190 continue;
1191
c026ca1c 1192 ready++;
0a95a0d0
DS
1193
1194 /* POLLIN / POLLOUT process event */
1195 if (m->handler.pfds[i].revents & POLLIN)
c026ca1c 1196 thread_process_fds_helper(m, m->read[m->handler.pfds[i].fd], NULL, POLLIN, i);
0a95a0d0 1197 if (m->handler.pfds[i].revents & POLLOUT)
c026ca1c 1198 thread_process_fds_helper(m, m->write[m->handler.pfds[i].fd], NULL, POLLOUT, i);
0a95a0d0 1199
1bba4c93
DS
1200 /* remove fd from list on POLLNVAL */
1201 if (m->handler.pfds[i].revents & POLLNVAL ||
1202 m->handler.pfds[i].revents & POLLHUP)
0a95a0d0 1203 {
aa037235
DS
1204 memmove(m->handler.pfds+i,
1205 m->handler.pfds+i+1,
1206 (m->handler.pfdsize-i-1) * sizeof(struct pollfd));
1207 m->handler.pfdcount--;
1208 i--;
0a95a0d0
DS
1209 }
1210 else
aa037235 1211 m->handler.pfds[i].revents = 0;
0a95a0d0
DS
1212 }
1213}
1214#endif
1215
1216static void
5d4ccd4e 1217thread_process_fds (struct thread_master *m, thread_fd_set *rset, thread_fd_set *wset, int num)
718e3744 1218{
0a95a0d0
DS
1219#if defined (HAVE_POLL)
1220 check_pollfds (m, rset, num);
1221#else
308d14ae
DV
1222 int ready = 0, index;
1223
5d4ccd4e 1224 for (index = 0; index < m->fd_limit && ready < num; ++index)
718e3744 1225 {
0a95a0d0
DS
1226 ready += thread_process_fds_helper (m, m->read[index], rset, 0, 0);
1227 ready += thread_process_fds_helper (m, m->write[index], wset, 0, 0);
718e3744 1228 }
0a95a0d0 1229#endif
718e3744 1230}
1231
8b70d0b0 1232/* Add all timers that have popped to the ready list. */
a48b4e6d 1233static unsigned int
4becea72 1234thread_timer_process (struct pqueue *queue, struct timeval *timenow)
a48b4e6d 1235{
1236 struct thread *thread;
1237 unsigned int ready = 0;
1238
4becea72 1239 while (queue->size)
8b70d0b0 1240 {
4becea72 1241 thread = queue->array[0];
8b70d0b0 1242 if (timeval_cmp (*timenow, thread->u.sands) < 0)
1243 return ready;
4becea72 1244 pqueue_dequeue(queue);
8b70d0b0 1245 thread->type = THREAD_READY;
1246 thread_list_add (&thread->master->ready, thread);
1247 ready++;
1248 }
a48b4e6d 1249 return ready;
1250}
1251
2613abe6
PJ
1252/* process a list en masse, e.g. for event thread lists */
1253static unsigned int
1254thread_process (struct thread_list *list)
1255{
1256 struct thread *thread;
b5043aab 1257 struct thread *next;
2613abe6
PJ
1258 unsigned int ready = 0;
1259
b5043aab 1260 for (thread = list->head; thread; thread = next)
2613abe6 1261 {
b5043aab 1262 next = thread->next;
2613abe6
PJ
1263 thread_list_delete (list, thread);
1264 thread->type = THREAD_READY;
1265 thread_list_add (&thread->master->ready, thread);
1266 ready++;
1267 }
1268 return ready;
1269}
1270
1271
718e3744 1272/* Fetch next ready thread. */
1273struct thread *
1274thread_fetch (struct thread_master *m, struct thread *fetch)
1275{
718e3744 1276 struct thread *thread;
209a72a6
DS
1277 thread_fd_set readfd;
1278 thread_fd_set writefd;
1279 thread_fd_set exceptfd;
2613abe6 1280 struct timeval timer_val = { .tv_sec = 0, .tv_usec = 0 };
a48b4e6d 1281 struct timeval timer_val_bg;
2613abe6 1282 struct timeval *timer_wait = &timer_val;
a48b4e6d 1283 struct timeval *timer_wait_bg;
718e3744 1284
1285 while (1)
1286 {
a48b4e6d 1287 int num = 0;
56e2c5e8 1288
2613abe6 1289 /* Signals pre-empt everything */
05c447dd 1290 quagga_sigevent_process ();
1291
2613abe6
PJ
1292 /* Drain the ready queue of already scheduled jobs, before scheduling
1293 * more.
a48b4e6d 1294 */
718e3744 1295 if ((thread = thread_trim_head (&m->ready)) != NULL)
05c447dd 1296 return thread_run (m, thread, fetch);
a48b4e6d 1297
2613abe6
PJ
1298 /* To be fair to all kinds of threads, and avoid starvation, we
1299 * need to be careful to consider all thread types for scheduling
1300 * in each quanta. I.e. we should not return early from here on.
1301 */
1302
1303 /* Normal event are the next highest priority. */
1304 thread_process (&m->event);
1305
718e3744 1306 /* Structure copy. */
0a95a0d0
DS
1307#if !defined(HAVE_POLL)
1308 readfd = fd_copy_fd_set(m->handler.readfd);
1309 writefd = fd_copy_fd_set(m->handler.writefd);
1310 exceptfd = fd_copy_fd_set(m->handler.exceptfd);
1311#endif
a48b4e6d 1312
1313 /* Calculate select wait timer if nothing else to do */
2613abe6
PJ
1314 if (m->ready.count == 0)
1315 {
1316 quagga_get_relative (NULL);
4becea72
CF
1317 timer_wait = thread_timer_wait (m->timer, &timer_val);
1318 timer_wait_bg = thread_timer_wait (m->background, &timer_val_bg);
2613abe6
PJ
1319
1320 if (timer_wait_bg &&
1321 (!timer_wait || (timeval_cmp (*timer_wait, *timer_wait_bg) > 0)))
1322 timer_wait = timer_wait_bg;
1323 }
56e2c5e8 1324
0a95a0d0 1325 num = fd_select (m, FD_SETSIZE, &readfd, &writefd, &exceptfd, timer_wait);
a48b4e6d 1326
1327 /* Signals should get quick treatment */
718e3744 1328 if (num < 0)
05c447dd 1329 {
1330 if (errno == EINTR)
a48b4e6d 1331 continue; /* signal received - process it */
6099b3b5 1332 zlog_warn ("select() error: %s", safe_strerror (errno));
5d4ccd4e 1333 return NULL;
05c447dd 1334 }
8b70d0b0 1335
1336 /* Check foreground timers. Historically, they have had higher
1337 priority than I/O threads, so let's push them onto the ready
1338 list in front of the I/O threads. */
db9c0df9 1339 quagga_get_relative (NULL);
4becea72 1340 thread_timer_process (m->timer, &relative_time);
a48b4e6d 1341
1342 /* Got IO, process it */
1343 if (num > 0)
5d4ccd4e 1344 thread_process_fds (m, &readfd, &writefd, num);
8b70d0b0 1345
1346#if 0
1347 /* If any threads were made ready above (I/O or foreground timer),
1348 perhaps we should avoid adding background timers to the ready
1349 list at this time. If this is code is uncommented, then background
1350 timer threads will not run unless there is nothing else to do. */
1351 if ((thread = thread_trim_head (&m->ready)) != NULL)
1352 return thread_run (m, thread, fetch);
1353#endif
1354
a48b4e6d 1355 /* Background timer/events, lowest priority */
4becea72 1356 thread_timer_process (m->background, &relative_time);
a48b4e6d 1357
8b70d0b0 1358 if ((thread = thread_trim_head (&m->ready)) != NULL)
05c447dd 1359 return thread_run (m, thread, fetch);
718e3744 1360 }
1361}
1362
924b9229 1363unsigned long
8b70d0b0 1364thread_consumed_time (RUSAGE_T *now, RUSAGE_T *start, unsigned long *cputime)
718e3744 1365{
718e3744 1366 /* This is 'user + sys' time. */
8b70d0b0 1367 *cputime = timeval_elapsed (now->cpu.ru_utime, start->cpu.ru_utime) +
1368 timeval_elapsed (now->cpu.ru_stime, start->cpu.ru_stime);
8b70d0b0 1369 return timeval_elapsed (now->real, start->real);
1370}
1371
50596be0
DS
1372/* We should aim to yield after yield milliseconds, which defaults
1373 to THREAD_YIELD_TIME_SLOT .
8b70d0b0 1374 Note: we are using real (wall clock) time for this calculation.
1375 It could be argued that CPU time may make more sense in certain
1376 contexts. The things to consider are whether the thread may have
1377 blocked (in which case wall time increases, but CPU time does not),
1378 or whether the system is heavily loaded with other processes competing
1379 for CPU time. On balance, wall clock time seems to make sense.
1380 Plus it has the added benefit that gettimeofday should be faster
1381 than calling getrusage. */
718e3744 1382int
1383thread_should_yield (struct thread *thread)
1384{
db9c0df9 1385 quagga_get_relative (NULL);
41af338e 1386 return (timeval_elapsed(relative_time, thread->real) >
50596be0
DS
1387 thread->yield);
1388}
1389
1390void
1391thread_set_yield_time (struct thread *thread, unsigned long yield_time)
1392{
1393 thread->yield = yield_time;
718e3744 1394}
1395
db9c0df9
PJ
1396void
1397thread_getrusage (RUSAGE_T *r)
1398{
1399 quagga_get_relative (NULL);
db9c0df9 1400 getrusage(RUSAGE_SELF, &(r->cpu));
db9c0df9
PJ
1401 r->real = relative_time;
1402
1403#ifdef HAVE_CLOCK_MONOTONIC
1404 /* quagga_get_relative() only updates recent_time if gettimeofday
1405 * based, not when using CLOCK_MONOTONIC. As we export recent_time
1406 * and guarantee to update it before threads are run...
1407 */
1408 quagga_gettimeofday(&recent_time);
1409#endif /* HAVE_CLOCK_MONOTONIC */
1410}
1411
d1265948
DL
1412struct thread *thread_current = NULL;
1413
718e3744 1414/* We check thread consumed time. If the system has getrusage, we'll
8b70d0b0 1415 use that to get in-depth stats on the performance of the thread in addition
1416 to wall clock time stats from gettimeofday. */
718e3744 1417void
1418thread_call (struct thread *thread)
1419{
8b70d0b0 1420 unsigned long realtime, cputime;
41af338e 1421 RUSAGE_T before, after;
cc8b13a0
PJ
1422
1423 /* Cache a pointer to the relevant cpu history thread, if the thread
1424 * does not have it yet.
1425 *
1426 * Callers submitting 'dummy threads' hence must take care that
1427 * thread->cpu is NULL
1428 */
1429 if (!thread->hist)
1430 {
1431 struct cpu_thread_history tmp;
1432
1433 tmp.func = thread->func;
9c7753e4 1434 tmp.funcname = thread->funcname;
cc8b13a0
PJ
1435
1436 thread->hist = hash_get (cpu_record, &tmp,
1437 (void * (*) (void *))cpu_record_hash_alloc);
1438 }
718e3744 1439
41af338e
JBD
1440 GETRUSAGE (&before);
1441 thread->real = before.real;
718e3744 1442
d1265948 1443 thread_current = thread;
718e3744 1444 (*thread->func) (thread);
d1265948 1445 thread_current = NULL;
718e3744 1446
41af338e 1447 GETRUSAGE (&after);
718e3744 1448
41af338e 1449 realtime = thread_consumed_time (&after, &before, &cputime);
cc8b13a0
PJ
1450 thread->hist->real.total += realtime;
1451 if (thread->hist->real.max < realtime)
1452 thread->hist->real.max = realtime;
cc8b13a0
PJ
1453 thread->hist->cpu.total += cputime;
1454 if (thread->hist->cpu.max < cputime)
1455 thread->hist->cpu.max = cputime;
e04ab74d 1456
cc8b13a0
PJ
1457 ++(thread->hist->total_calls);
1458 thread->hist->types |= (1 << thread->add_type);
718e3744 1459
924b9229 1460#ifdef CONSUMED_TIME_CHECK
8b70d0b0 1461 if (realtime > CONSUMED_TIME_CHECK)
718e3744 1462 {
1463 /*
1464 * We have a CPU Hog on our hands.
1465 * Whinge about it now, so we're aware this is yet another task
1466 * to fix.
1467 */
8b70d0b0 1468 zlog_warn ("SLOW THREAD: task %s (%lx) ran for %lums (cpu time %lums)",
924b9229 1469 thread->funcname,
1470 (unsigned long) thread->func,
8b70d0b0 1471 realtime/1000, cputime/1000);
718e3744 1472 }
924b9229 1473#endif /* CONSUMED_TIME_CHECK */
718e3744 1474}
1475
1476/* Execute thread */
1477struct thread *
e04ab74d 1478funcname_thread_execute (struct thread_master *m,
718e3744 1479 int (*func)(struct thread *),
1480 void *arg,
e04ab74d 1481 int val,
9c7753e4 1482 debugargdef)
718e3744 1483{
1484 struct thread dummy;
1485
1486 memset (&dummy, 0, sizeof (struct thread));
1487
1488 dummy.type = THREAD_EVENT;
e04ab74d 1489 dummy.add_type = THREAD_EXECUTE;
718e3744 1490 dummy.master = NULL;
1491 dummy.func = func;
1492 dummy.arg = arg;
1493 dummy.u.val = val;
9c7753e4
DL
1494
1495 dummy.funcname = funcname;
1496 dummy.schedfrom = schedfrom;
1497 dummy.schedfrom_line = fromln;
1498
718e3744 1499 thread_call (&dummy);
1500
1501 return NULL;
1502}