]> git.proxmox.com Git - mirror_frr.git/blame - lib/thread.c
isisd, vtysh: Fix isis routemaps
[mirror_frr.git] / lib / thread.c
CommitLineData
718e3744 1/* Thread management routine
2 * Copyright (C) 1998, 2000 Kunihiro Ishiguro <kunihiro@zebra.org>
3 *
4 * This file is part of GNU Zebra.
5 *
6 * GNU Zebra is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2, or (at your option) any
9 * later version.
10 *
11 * GNU Zebra is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with GNU Zebra; see the file COPYING. If not, write to the Free
18 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
19 * 02111-1307, USA.
20 */
21
22/* #define DEBUG */
23
24#include <zebra.h>
308d14ae 25#include <sys/resource.h>
718e3744 26
27#include "thread.h"
28#include "memory.h"
29#include "log.h"
e04ab74d 30#include "hash.h"
4becea72 31#include "pqueue.h"
e04ab74d 32#include "command.h"
05c447dd 33#include "sigevent.h"
d6be5fb9 34
3b96b781
HT
35#if defined(__APPLE__)
36#include <mach/mach.h>
37#include <mach/mach_time.h>
38#endif
39
db9c0df9 40/* Recent absolute time of day */
8b70d0b0 41struct timeval recent_time;
db9c0df9
PJ
42/* Relative time, since startup */
43static struct timeval relative_time;
6b0655a2 44
e04ab74d 45static struct hash *cpu_record = NULL;
6b0655a2 46
8b70d0b0 47/* Adjust so that tv_usec is in the range [0,TIMER_SECOND_MICRO).
48 And change negative values to 0. */
a48b4e6d 49static struct timeval
718e3744 50timeval_adjust (struct timeval a)
51{
52 while (a.tv_usec >= TIMER_SECOND_MICRO)
53 {
54 a.tv_usec -= TIMER_SECOND_MICRO;
55 a.tv_sec++;
56 }
57
58 while (a.tv_usec < 0)
59 {
60 a.tv_usec += TIMER_SECOND_MICRO;
61 a.tv_sec--;
62 }
63
64 if (a.tv_sec < 0)
8b70d0b0 65 /* Change negative timeouts to 0. */
66 a.tv_sec = a.tv_usec = 0;
718e3744 67
68 return a;
69}
70
71static struct timeval
72timeval_subtract (struct timeval a, struct timeval b)
73{
74 struct timeval ret;
75
76 ret.tv_usec = a.tv_usec - b.tv_usec;
77 ret.tv_sec = a.tv_sec - b.tv_sec;
78
79 return timeval_adjust (ret);
80}
81
8b70d0b0 82static long
718e3744 83timeval_cmp (struct timeval a, struct timeval b)
84{
85 return (a.tv_sec == b.tv_sec
86 ? a.tv_usec - b.tv_usec : a.tv_sec - b.tv_sec);
87}
88
cf744958 89unsigned long
718e3744 90timeval_elapsed (struct timeval a, struct timeval b)
91{
92 return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO)
93 + (a.tv_usec - b.tv_usec));
94}
6b0655a2 95
db9c0df9
PJ
96/* gettimeofday wrapper, to keep recent_time updated */
97static int
98quagga_gettimeofday (struct timeval *tv)
99{
100 int ret;
101
102 assert (tv);
103
104 if (!(ret = gettimeofday (&recent_time, NULL)))
105 {
db9c0df9
PJ
106 /* avoid copy if user passed recent_time pointer.. */
107 if (tv != &recent_time)
108 *tv = recent_time;
109 return 0;
110 }
111 return ret;
112}
113
114static int
115quagga_get_relative (struct timeval *tv)
116{
117 int ret;
118
119#ifdef HAVE_CLOCK_MONOTONIC
120 {
121 struct timespec tp;
122 if (!(ret = clock_gettime (CLOCK_MONOTONIC, &tp)))
123 {
124 relative_time.tv_sec = tp.tv_sec;
125 relative_time.tv_usec = tp.tv_nsec / 1000;
126 }
127 }
3b96b781
HT
128#elif defined(__APPLE__)
129 {
130 uint64_t ticks;
131 uint64_t useconds;
132 static mach_timebase_info_data_t timebase_info;
133
134 ticks = mach_absolute_time();
135 if (timebase_info.denom == 0)
136 mach_timebase_info(&timebase_info);
137
138 useconds = ticks * timebase_info.numer / timebase_info.denom / 1000;
139 relative_time.tv_sec = useconds / 1000000;
140 relative_time.tv_usec = useconds % 1000000;
141
142 return 0;
143 }
144#else /* !HAVE_CLOCK_MONOTONIC && !__APPLE__ */
16f5949d 145#error no monotonic clock on this system
db9c0df9
PJ
146#endif /* HAVE_CLOCK_MONOTONIC */
147
148 if (tv)
149 *tv = relative_time;
150
151 return ret;
152}
153
db9c0df9
PJ
154/* Exported Quagga timestamp function.
155 * Modelled on POSIX clock_gettime.
156 */
157int
158quagga_gettime (enum quagga_clkid clkid, struct timeval *tv)
159{
160 switch (clkid)
161 {
db9c0df9
PJ
162 case QUAGGA_CLK_MONOTONIC:
163 return quagga_get_relative (tv);
db9c0df9
PJ
164 default:
165 errno = EINVAL;
166 return -1;
167 }
168}
169
a05d8b7a
DL
170time_t
171quagga_monotime (void)
172{
173 struct timeval tv;
174 quagga_get_relative(&tv);
175 return tv.tv_sec;
176}
177
db9c0df9
PJ
178/* Public export of recent_relative_time by value */
179struct timeval
180recent_relative_time (void)
181{
182 return relative_time;
183}
6b0655a2 184
a48b4e6d 185static unsigned int
e04ab74d 186cpu_record_hash_key (struct cpu_thread_history *a)
187{
8cc4198f 188 return (uintptr_t) a->func;
e04ab74d 189}
190
191static int
ffe11cfb
SH
192cpu_record_hash_cmp (const struct cpu_thread_history *a,
193 const struct cpu_thread_history *b)
e04ab74d 194{
195 return a->func == b->func;
196}
197
8cc4198f 198static void *
e04ab74d 199cpu_record_hash_alloc (struct cpu_thread_history *a)
200{
201 struct cpu_thread_history *new;
039b9577 202 new = XCALLOC (MTYPE_THREAD_STATS, sizeof (struct cpu_thread_history));
e04ab74d 203 new->func = a->func;
9c7753e4 204 new->funcname = a->funcname;
e04ab74d 205 return new;
206}
207
228da428
CC
208static void
209cpu_record_hash_free (void *a)
210{
211 struct cpu_thread_history *hist = a;
212
228da428
CC
213 XFREE (MTYPE_THREAD_STATS, hist);
214}
215
f63f06da 216static void
e04ab74d 217vty_out_cpu_thread_history(struct vty* vty,
218 struct cpu_thread_history *a)
219{
8b70d0b0 220#ifdef HAVE_RUSAGE
221 vty_out(vty, "%7ld.%03ld %9d %8ld %9ld %8ld %9ld",
222 a->cpu.total/1000, a->cpu.total%1000, a->total_calls,
223 a->cpu.total/a->total_calls, a->cpu.max,
224 a->real.total/a->total_calls, a->real.max);
225#else
226 vty_out(vty, "%7ld.%03ld %9d %8ld %9ld",
227 a->real.total/1000, a->real.total%1000, a->total_calls,
228 a->real.total/a->total_calls, a->real.max);
229#endif
230 vty_out(vty, " %c%c%c%c%c%c %s%s",
e04ab74d 231 a->types & (1 << THREAD_READ) ? 'R':' ',
232 a->types & (1 << THREAD_WRITE) ? 'W':' ',
233 a->types & (1 << THREAD_TIMER) ? 'T':' ',
234 a->types & (1 << THREAD_EVENT) ? 'E':' ',
235 a->types & (1 << THREAD_EXECUTE) ? 'X':' ',
a48b4e6d 236 a->types & (1 << THREAD_BACKGROUND) ? 'B' : ' ',
e04ab74d 237 a->funcname, VTY_NEWLINE);
238}
239
240static void
241cpu_record_hash_print(struct hash_backet *bucket,
242 void *args[])
243{
244 struct cpu_thread_history *totals = args[0];
245 struct vty *vty = args[1];
41b2373c 246 thread_type *filter = args[2];
e04ab74d 247 struct cpu_thread_history *a = bucket->data;
f48f65d2 248
e04ab74d 249 if ( !(a->types & *filter) )
250 return;
251 vty_out_cpu_thread_history(vty,a);
e04ab74d 252 totals->total_calls += a->total_calls;
8b70d0b0 253 totals->real.total += a->real.total;
254 if (totals->real.max < a->real.max)
255 totals->real.max = a->real.max;
256#ifdef HAVE_RUSAGE
257 totals->cpu.total += a->cpu.total;
258 if (totals->cpu.max < a->cpu.max)
259 totals->cpu.max = a->cpu.max;
260#endif
e04ab74d 261}
262
263static void
41b2373c 264cpu_record_print(struct vty *vty, thread_type filter)
e04ab74d 265{
266 struct cpu_thread_history tmp;
267 void *args[3] = {&tmp, vty, &filter};
268
269 memset(&tmp, 0, sizeof tmp);
9c7753e4 270 tmp.funcname = "TOTAL";
e04ab74d 271 tmp.types = filter;
272
8b70d0b0 273#ifdef HAVE_RUSAGE
274 vty_out(vty, "%21s %18s %18s%s",
275 "", "CPU (user+system):", "Real (wall-clock):", VTY_NEWLINE);
276#endif
277 vty_out(vty, "Runtime(ms) Invoked Avg uSec Max uSecs");
278#ifdef HAVE_RUSAGE
279 vty_out(vty, " Avg uSec Max uSecs");
280#endif
281 vty_out(vty, " Type Thread%s", VTY_NEWLINE);
e04ab74d 282 hash_iterate(cpu_record,
283 (void(*)(struct hash_backet*,void*))cpu_record_hash_print,
284 args);
285
286 if (tmp.total_calls > 0)
287 vty_out_cpu_thread_history(vty, &tmp);
288}
289
290DEFUN(show_thread_cpu,
291 show_thread_cpu_cmd,
292 "show thread cpu [FILTER]",
293 SHOW_STR
294 "Thread information\n"
295 "Thread CPU usage\n"
a48b4e6d 296 "Display filter (rwtexb)\n")
e04ab74d 297{
298 int i = 0;
41b2373c 299 thread_type filter = (thread_type) -1U;
e04ab74d 300
301 if (argc > 0)
302 {
303 filter = 0;
304 while (argv[0][i] != '\0')
305 {
306 switch ( argv[0][i] )
307 {
308 case 'r':
309 case 'R':
310 filter |= (1 << THREAD_READ);
311 break;
312 case 'w':
313 case 'W':
314 filter |= (1 << THREAD_WRITE);
315 break;
316 case 't':
317 case 'T':
318 filter |= (1 << THREAD_TIMER);
319 break;
320 case 'e':
321 case 'E':
322 filter |= (1 << THREAD_EVENT);
323 break;
324 case 'x':
325 case 'X':
326 filter |= (1 << THREAD_EXECUTE);
327 break;
a48b4e6d 328 case 'b':
329 case 'B':
330 filter |= (1 << THREAD_BACKGROUND);
331 break;
e04ab74d 332 default:
333 break;
334 }
335 ++i;
336 }
337 if (filter == 0)
338 {
a48b4e6d 339 vty_out(vty, "Invalid filter \"%s\" specified,"
340 " must contain at least one of 'RWTEXB'%s",
e04ab74d 341 argv[0], VTY_NEWLINE);
342 return CMD_WARNING;
343 }
344 }
345
346 cpu_record_print(vty, filter);
347 return CMD_SUCCESS;
348}
e276eb82
PJ
349
350static void
351cpu_record_hash_clear (struct hash_backet *bucket,
352 void *args)
353{
354 thread_type *filter = args;
355 struct cpu_thread_history *a = bucket->data;
f48f65d2 356
e276eb82
PJ
357 if ( !(a->types & *filter) )
358 return;
359
360 hash_release (cpu_record, bucket->data);
361}
362
363static void
364cpu_record_clear (thread_type filter)
365{
366 thread_type *tmp = &filter;
367 hash_iterate (cpu_record,
368 (void (*) (struct hash_backet*,void*)) cpu_record_hash_clear,
369 tmp);
370}
371
372DEFUN(clear_thread_cpu,
373 clear_thread_cpu_cmd,
374 "clear thread cpu [FILTER]",
375 "Clear stored data\n"
376 "Thread information\n"
377 "Thread CPU usage\n"
378 "Display filter (rwtexb)\n")
379{
380 int i = 0;
381 thread_type filter = (thread_type) -1U;
382
383 if (argc > 0)
384 {
385 filter = 0;
386 while (argv[0][i] != '\0')
387 {
388 switch ( argv[0][i] )
389 {
390 case 'r':
391 case 'R':
392 filter |= (1 << THREAD_READ);
393 break;
394 case 'w':
395 case 'W':
396 filter |= (1 << THREAD_WRITE);
397 break;
398 case 't':
399 case 'T':
400 filter |= (1 << THREAD_TIMER);
401 break;
402 case 'e':
403 case 'E':
404 filter |= (1 << THREAD_EVENT);
405 break;
406 case 'x':
407 case 'X':
408 filter |= (1 << THREAD_EXECUTE);
409 break;
410 case 'b':
411 case 'B':
412 filter |= (1 << THREAD_BACKGROUND);
413 break;
414 default:
415 break;
416 }
417 ++i;
418 }
419 if (filter == 0)
420 {
421 vty_out(vty, "Invalid filter \"%s\" specified,"
422 " must contain at least one of 'RWTEXB'%s",
423 argv[0], VTY_NEWLINE);
424 return CMD_WARNING;
425 }
426 }
427
428 cpu_record_clear (filter);
429 return CMD_SUCCESS;
430}
6b0655a2 431
4becea72
CF
432static int
433thread_timer_cmp(void *a, void *b)
434{
435 struct thread *thread_a = a;
436 struct thread *thread_b = b;
437
438 long cmp = timeval_cmp(thread_a->u.sands, thread_b->u.sands);
439
440 if (cmp < 0)
441 return -1;
442 if (cmp > 0)
443 return 1;
444 return 0;
445}
446
447static void
448thread_timer_update(void *node, int actual_position)
449{
450 struct thread *thread = node;
451
452 thread->index = actual_position;
453}
454
718e3744 455/* Allocate new thread master. */
456struct thread_master *
0a95a0d0 457thread_master_create (void)
718e3744 458{
4becea72 459 struct thread_master *rv;
308d14ae
DV
460 struct rlimit limit;
461
462 getrlimit(RLIMIT_NOFILE, &limit);
4becea72 463
e04ab74d 464 if (cpu_record == NULL)
8cc4198f 465 cpu_record
90645f55
SH
466 = hash_create ((unsigned int (*) (void *))cpu_record_hash_key,
467 (int (*) (const void *, const void *))cpu_record_hash_cmp);
4becea72
CF
468
469 rv = XCALLOC (MTYPE_THREAD_MASTER, sizeof (struct thread_master));
308d14ae
DV
470 if (rv == NULL)
471 {
472 return NULL;
473 }
474
475 rv->fd_limit = (int)limit.rlim_cur;
476 rv->read = XCALLOC (MTYPE_THREAD, sizeof (struct thread *) * rv->fd_limit);
477 if (rv->read == NULL)
478 {
479 XFREE (MTYPE_THREAD_MASTER, rv);
480 return NULL;
481 }
482
483 rv->write = XCALLOC (MTYPE_THREAD, sizeof (struct thread *) * rv->fd_limit);
484 if (rv->write == NULL)
485 {
486 XFREE (MTYPE_THREAD, rv->read);
487 XFREE (MTYPE_THREAD_MASTER, rv);
488 return NULL;
489 }
4becea72
CF
490
491 /* Initialize the timer queues */
492 rv->timer = pqueue_create();
493 rv->background = pqueue_create();
494 rv->timer->cmp = rv->background->cmp = thread_timer_cmp;
495 rv->timer->update = rv->background->update = thread_timer_update;
496
0a95a0d0 497#if defined(HAVE_POLL)
b53e10a1 498 rv->handler.pfdsize = rv->fd_limit;
0a95a0d0
DS
499 rv->handler.pfdcount = 0;
500 rv->handler.pfds = (struct pollfd *) malloc (sizeof (struct pollfd) * rv->handler.pfdsize);
501 memset (rv->handler.pfds, 0, sizeof (struct pollfd) * rv->handler.pfdsize);
502#endif
4becea72 503 return rv;
718e3744 504}
505
506/* Add a new thread to the list. */
507static void
508thread_list_add (struct thread_list *list, struct thread *thread)
509{
510 thread->next = NULL;
511 thread->prev = list->tail;
512 if (list->tail)
513 list->tail->next = thread;
514 else
515 list->head = thread;
516 list->tail = thread;
517 list->count++;
518}
519
718e3744 520/* Delete a thread from the list. */
521static struct thread *
522thread_list_delete (struct thread_list *list, struct thread *thread)
523{
524 if (thread->next)
525 thread->next->prev = thread->prev;
526 else
527 list->tail = thread->prev;
528 if (thread->prev)
529 thread->prev->next = thread->next;
530 else
531 list->head = thread->next;
532 thread->next = thread->prev = NULL;
533 list->count--;
534 return thread;
535}
536
308d14ae
DV
537static void
538thread_delete_fd (struct thread **thread_array, struct thread *thread)
539{
540 thread_array[thread->u.fd] = NULL;
541}
542
543static void
544thread_add_fd (struct thread **thread_array, struct thread *thread)
545{
546 thread_array[thread->u.fd] = thread;
547}
548
495f0b13
DS
549/* Thread list is empty or not. */
550static int
551thread_empty (struct thread_list *list)
552{
553 return list->head ? 0 : 1;
554}
555
556/* Delete top of the list and return it. */
557static struct thread *
558thread_trim_head (struct thread_list *list)
559{
560 if (!thread_empty (list))
561 return thread_list_delete (list, list->head);
562 return NULL;
563}
564
718e3744 565/* Move thread to unuse list. */
566static void
567thread_add_unuse (struct thread_master *m, struct thread *thread)
568{
a48b4e6d 569 assert (m != NULL && thread != NULL);
718e3744 570 assert (thread->next == NULL);
571 assert (thread->prev == NULL);
572 assert (thread->type == THREAD_UNUSED);
573 thread_list_add (&m->unuse, thread);
574}
575
576/* Free all unused thread. */
577static void
578thread_list_free (struct thread_master *m, struct thread_list *list)
579{
580 struct thread *t;
581 struct thread *next;
582
583 for (t = list->head; t; t = next)
584 {
585 next = t->next;
586 XFREE (MTYPE_THREAD, t);
587 list->count--;
588 m->alloc--;
589 }
590}
591
308d14ae
DV
592static void
593thread_array_free (struct thread_master *m, struct thread **thread_array)
594{
595 struct thread *t;
596 int index;
597
598 for (index = 0; index < m->fd_limit; ++index)
599 {
600 t = thread_array[index];
601 if (t)
602 {
603 thread_array[index] = NULL;
604 XFREE (MTYPE_THREAD, t);
605 m->alloc--;
606 }
607 }
608 XFREE (MTYPE_THREAD, thread_array);
609}
610
4becea72
CF
611static void
612thread_queue_free (struct thread_master *m, struct pqueue *queue)
613{
614 int i;
615
616 for (i = 0; i < queue->size; i++)
617 XFREE(MTYPE_THREAD, queue->array[i]);
618
619 m->alloc -= queue->size;
620 pqueue_delete(queue);
621}
622
495f0b13
DS
623/*
624 * thread_master_free_unused
625 *
626 * As threads are finished with they are put on the
627 * unuse list for later reuse.
628 * If we are shutting down, Free up unused threads
629 * So we can see if we forget to shut anything off
630 */
631void
632thread_master_free_unused (struct thread_master *m)
633{
634 struct thread *t;
635 while ((t = thread_trim_head(&m->unuse)) != NULL)
636 {
637 XFREE(MTYPE_THREAD, t);
638 }
639}
640
718e3744 641/* Stop thread scheduler. */
642void
643thread_master_free (struct thread_master *m)
644{
308d14ae
DV
645 thread_array_free (m, m->read);
646 thread_array_free (m, m->write);
4becea72 647 thread_queue_free (m, m->timer);
718e3744 648 thread_list_free (m, &m->event);
649 thread_list_free (m, &m->ready);
650 thread_list_free (m, &m->unuse);
4becea72 651 thread_queue_free (m, m->background);
0a95a0d0
DS
652
653#if defined(HAVE_POLL)
654 XFREE (MTYPE_THREAD_MASTER, m->handler.pfds);
655#endif
718e3744 656 XFREE (MTYPE_THREAD_MASTER, m);
228da428
CC
657
658 if (cpu_record)
659 {
660 hash_clean (cpu_record, cpu_record_hash_free);
661 hash_free (cpu_record);
662 cpu_record = NULL;
663 }
718e3744 664}
665
718e3744 666/* Return remain time in second. */
667unsigned long
668thread_timer_remain_second (struct thread *thread)
669{
db9c0df9
PJ
670 quagga_get_relative (NULL);
671
672 if (thread->u.sands.tv_sec - relative_time.tv_sec > 0)
673 return thread->u.sands.tv_sec - relative_time.tv_sec;
718e3744 674 else
675 return 0;
676}
677
9c7753e4
DL
678#define debugargdef const char *funcname, const char *schedfrom, int fromln
679#define debugargpass funcname, schedfrom, fromln
e04ab74d 680
6ac44687
CF
681struct timeval
682thread_timer_remain(struct thread *thread)
683{
684 quagga_get_relative(NULL);
685
686 return timeval_subtract(thread->u.sands, relative_time);
687}
688
718e3744 689/* Get new thread. */
690static struct thread *
691thread_get (struct thread_master *m, u_char type,
9c7753e4 692 int (*func) (struct thread *), void *arg, debugargdef)
718e3744 693{
64018324 694 struct thread *thread = thread_trim_head (&m->unuse);
718e3744 695
22714f99 696 if (! thread)
718e3744 697 {
698 thread = XCALLOC (MTYPE_THREAD, sizeof (struct thread));
699 m->alloc++;
700 }
701 thread->type = type;
e04ab74d 702 thread->add_type = type;
718e3744 703 thread->master = m;
704 thread->func = func;
705 thread->arg = arg;
4becea72 706 thread->index = -1;
50596be0 707 thread->yield = THREAD_YIELD_TIME_SLOT; /* default */
4becea72 708
9c7753e4
DL
709 thread->funcname = funcname;
710 thread->schedfrom = schedfrom;
711 thread->schedfrom_line = fromln;
e04ab74d 712
718e3744 713 return thread;
714}
715
aa037235
DS
716#if defined (HAVE_POLL)
717
209a72a6
DS
718#define fd_copy_fd_set(X) (X)
719
0a95a0d0
DS
720/* generic add thread function */
721static struct thread *
aa037235 722generic_thread_add(struct thread_master *m, int (*func) (struct thread *),
9c7753e4 723 void *arg, int fd, int dir, debugargdef)
0a95a0d0
DS
724{
725 struct thread *thread;
726
aa037235 727 u_char type;
0a95a0d0
DS
728 short int event;
729
730 if (dir == THREAD_READ)
aa037235
DS
731 {
732 event = (POLLIN | POLLHUP);
733 type = THREAD_READ;
734 }
0a95a0d0 735 else
aa037235
DS
736 {
737 event = (POLLOUT | POLLHUP);
738 type = THREAD_WRITE;
739 }
0a95a0d0
DS
740
741 nfds_t queuepos = m->handler.pfdcount;
742 nfds_t i=0;
aa037235 743 for (i=0; i<m->handler.pfdcount; i++)
0a95a0d0
DS
744 if (m->handler.pfds[i].fd == fd)
745 {
746 queuepos = i;
747 break;
748 }
749
750 /* is there enough space for a new fd? */
b53e10a1 751 assert (queuepos < m->handler.pfdsize);
0a95a0d0 752
69f30024 753 thread = thread_get (m, type, func, arg, debugargpass);
0a95a0d0
DS
754 m->handler.pfds[queuepos].fd = fd;
755 m->handler.pfds[queuepos].events |= event;
756 if (queuepos == m->handler.pfdcount)
757 m->handler.pfdcount++;
0a95a0d0 758
cc7165b6
DS
759 return thread;
760}
aa037235
DS
761#else
762
763#define fd_copy_fd_set(X) (X)
764#endif
cc7165b6 765
209a72a6 766static int
0a95a0d0 767fd_select (struct thread_master *m, int size, thread_fd_set *read, thread_fd_set *write, thread_fd_set *except, struct timeval *timer_wait)
209a72a6 768{
0a95a0d0
DS
769 int num;
770#if defined(HAVE_POLL)
771 /* recalc timeout for poll. Attention NULL pointer is no timeout with
772 select, where with poll no timeount is -1 */
773 int timeout = -1;
774 if (timer_wait != NULL)
775 timeout = (timer_wait->tv_sec*1000) + (timer_wait->tv_usec/1000);
776
777 num = poll (m->handler.pfds, m->handler.pfdcount + m->handler.pfdcountsnmp, timeout);
778#else
779 num = select (size, read, write, except, timer_wait);
780#endif
781
782 return num;
209a72a6
DS
783}
784
785static int
0a95a0d0 786fd_is_set (struct thread *thread, thread_fd_set *fdset, int pos)
209a72a6 787{
0a95a0d0
DS
788#if defined(HAVE_POLL)
789 return 1;
790#else
791 return FD_ISSET (THREAD_FD (thread), fdset);
792#endif
209a72a6
DS
793}
794
209a72a6 795static int
0a95a0d0 796fd_clear_read_write (struct thread *thread)
209a72a6 797{
0a95a0d0
DS
798#if !defined(HAVE_POLL)
799 thread_fd_set *fdset = NULL;
800 int fd = THREAD_FD (thread);
801
802 if (thread->type == THREAD_READ)
803 fdset = &thread->master->handler.readfd;
804 else
805 fdset = &thread->master->handler.writefd;
806
209a72a6
DS
807 if (!FD_ISSET (fd, fdset))
808 return 0;
809
810 FD_CLR (fd, fdset);
0a95a0d0 811#endif
209a72a6
DS
812 return 1;
813}
814
718e3744 815/* Add new read thread. */
816struct thread *
8dadcae7 817funcname_thread_add_read_write (int dir, struct thread_master *m,
9c7753e4
DL
818 int (*func) (struct thread *), void *arg, int fd,
819 debugargdef)
718e3744 820{
8dadcae7 821 struct thread *thread = NULL;
718e3744 822
aa037235
DS
823#if !defined(HAVE_POLL)
824 thread_fd_set *fdset = NULL;
825 if (dir == THREAD_READ)
826 fdset = &m->handler.readfd;
827 else
828 fdset = &m->handler.writefd;
829#endif
830
831#if defined (HAVE_POLL)
9c7753e4 832 thread = generic_thread_add(m, func, arg, fd, dir, debugargpass);
aa037235
DS
833
834 if (thread == NULL)
835 return NULL;
836#else
837 if (FD_ISSET (fd, fdset))
838 {
839 zlog (NULL, LOG_WARNING, "There is already %s fd [%d]", (dir = THREAD_READ) ? "read" : "write", fd);
840 return NULL;
841 }
842
843 FD_SET (fd, fdset);
9c7753e4 844 thread = thread_get (m, dir, func, arg, debugargpass);
aa037235 845#endif
0a95a0d0 846
718e3744 847 thread->u.fd = fd;
8dadcae7
DS
848 if (dir == THREAD_READ)
849 thread_add_fd (m->read, thread);
850 else
851 thread_add_fd (m->write, thread);
718e3744 852
853 return thread;
854}
855
98c91ac6 856static struct thread *
857funcname_thread_add_timer_timeval (struct thread_master *m,
858 int (*func) (struct thread *),
a48b4e6d 859 int type,
98c91ac6 860 void *arg,
9c7753e4
DL
861 struct timeval *time_relative,
862 debugargdef)
718e3744 863{
718e3744 864 struct thread *thread;
4becea72 865 struct pqueue *queue;
db9c0df9 866 struct timeval alarm_time;
718e3744 867
868 assert (m != NULL);
869
8b70d0b0 870 assert (type == THREAD_TIMER || type == THREAD_BACKGROUND);
a48b4e6d 871 assert (time_relative);
872
4becea72 873 queue = ((type == THREAD_TIMER) ? m->timer : m->background);
9c7753e4 874 thread = thread_get (m, type, func, arg, debugargpass);
718e3744 875
876 /* Do we need jitter here? */
b8192765 877 quagga_get_relative (NULL);
db9c0df9
PJ
878 alarm_time.tv_sec = relative_time.tv_sec + time_relative->tv_sec;
879 alarm_time.tv_usec = relative_time.tv_usec + time_relative->tv_usec;
8b70d0b0 880 thread->u.sands = timeval_adjust(alarm_time);
718e3744 881
4becea72 882 pqueue_enqueue(thread, queue);
9e867fe6 883 return thread;
884}
885
98c91ac6 886
887/* Add timer event thread. */
9e867fe6 888struct thread *
98c91ac6 889funcname_thread_add_timer (struct thread_master *m,
890 int (*func) (struct thread *),
9c7753e4
DL
891 void *arg, long timer,
892 debugargdef)
9e867fe6 893{
98c91ac6 894 struct timeval trel;
9e867fe6 895
896 assert (m != NULL);
897
9076fbd3 898 trel.tv_sec = timer;
98c91ac6 899 trel.tv_usec = 0;
9e867fe6 900
a48b4e6d 901 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER, arg,
9c7753e4 902 &trel, debugargpass);
98c91ac6 903}
9e867fe6 904
98c91ac6 905/* Add timer event thread with "millisecond" resolution */
906struct thread *
907funcname_thread_add_timer_msec (struct thread_master *m,
908 int (*func) (struct thread *),
9c7753e4
DL
909 void *arg, long timer,
910 debugargdef)
98c91ac6 911{
912 struct timeval trel;
9e867fe6 913
98c91ac6 914 assert (m != NULL);
718e3744 915
af04bd7c 916 trel.tv_sec = timer / 1000;
917 trel.tv_usec = 1000*(timer % 1000);
98c91ac6 918
a48b4e6d 919 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER,
9c7753e4 920 arg, &trel, debugargpass);
a48b4e6d 921}
922
d03c4cbd
DL
923/* Add timer event thread with "millisecond" resolution */
924struct thread *
925funcname_thread_add_timer_tv (struct thread_master *m,
926 int (*func) (struct thread *),
927 void *arg, struct timeval *tv,
928 debugargdef)
929{
930 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER,
931 arg, tv, debugargpass);
932}
933
a48b4e6d 934/* Add a background thread, with an optional millisec delay */
935struct thread *
936funcname_thread_add_background (struct thread_master *m,
937 int (*func) (struct thread *),
9c7753e4
DL
938 void *arg, long delay,
939 debugargdef)
a48b4e6d 940{
941 struct timeval trel;
942
943 assert (m != NULL);
944
945 if (delay)
946 {
947 trel.tv_sec = delay / 1000;
948 trel.tv_usec = 1000*(delay % 1000);
949 }
950 else
951 {
952 trel.tv_sec = 0;
953 trel.tv_usec = 0;
954 }
955
956 return funcname_thread_add_timer_timeval (m, func, THREAD_BACKGROUND,
9c7753e4 957 arg, &trel, debugargpass);
718e3744 958}
959
960/* Add simple event thread. */
961struct thread *
e04ab74d 962funcname_thread_add_event (struct thread_master *m,
9c7753e4
DL
963 int (*func) (struct thread *), void *arg, int val,
964 debugargdef)
718e3744 965{
966 struct thread *thread;
967
968 assert (m != NULL);
969
9c7753e4 970 thread = thread_get (m, THREAD_EVENT, func, arg, debugargpass);
718e3744 971 thread->u.val = val;
972 thread_list_add (&m->event, thread);
973
974 return thread;
975}
976
0a95a0d0 977static void
1bba4c93 978thread_cancel_read_or_write (struct thread *thread, short int state)
0a95a0d0
DS
979{
980#if defined(HAVE_POLL)
981 nfds_t i;
982
983 for (i=0;i<thread->master->handler.pfdcount;++i)
984 if (thread->master->handler.pfds[i].fd == thread->u.fd)
985 {
1bba4c93
DS
986 thread->master->handler.pfds[i].events &= ~(state);
987
0a95a0d0 988 /* remove thread fds from pfd list */
1bba4c93
DS
989 if (thread->master->handler.pfds[i].events == 0)
990 {
991 memmove(thread->master->handler.pfds+i,
992 thread->master->handler.pfds+i+1,
993 (thread->master->handler.pfdsize-i-1) * sizeof(struct pollfd));
994 thread->master->handler.pfdcount--;
995 return;
996 }
0a95a0d0
DS
997 }
998#endif
999
1000 fd_clear_read_write (thread);
1001}
1002
718e3744 1003/* Cancel thread from scheduler. */
1004void
1005thread_cancel (struct thread *thread)
1006{
4becea72
CF
1007 struct thread_list *list = NULL;
1008 struct pqueue *queue = NULL;
308d14ae 1009 struct thread **thread_array = NULL;
a48b4e6d 1010
718e3744 1011 switch (thread->type)
1012 {
1013 case THREAD_READ:
1bba4c93
DS
1014#if defined (HAVE_POLL)
1015 thread_cancel_read_or_write (thread, POLLIN | POLLHUP);
1016#else
1017 thread_cancel_read_or_write (thread, 0);
1018#endif
308d14ae 1019 thread_array = thread->master->read;
718e3744 1020 break;
1021 case THREAD_WRITE:
1bba4c93
DS
1022#if defined (HAVE_POLL)
1023 thread_cancel_read_or_write (thread, POLLOUT | POLLHUP);
1024#else
1025 thread_cancel_read_or_write (thread, 0);
1026#endif
308d14ae 1027 thread_array = thread->master->write;
718e3744 1028 break;
1029 case THREAD_TIMER:
4becea72 1030 queue = thread->master->timer;
718e3744 1031 break;
1032 case THREAD_EVENT:
a48b4e6d 1033 list = &thread->master->event;
718e3744 1034 break;
1035 case THREAD_READY:
a48b4e6d 1036 list = &thread->master->ready;
718e3744 1037 break;
a48b4e6d 1038 case THREAD_BACKGROUND:
4becea72 1039 queue = thread->master->background;
8b70d0b0 1040 break;
718e3744 1041 default:
a48b4e6d 1042 return;
718e3744 1043 break;
1044 }
4becea72
CF
1045
1046 if (queue)
1047 {
1048 assert(thread->index >= 0);
1049 assert(thread == queue->array[thread->index]);
1050 pqueue_remove_at(thread->index, queue);
1051 }
1052 else if (list)
1053 {
1054 thread_list_delete (list, thread);
1055 }
308d14ae
DV
1056 else if (thread_array)
1057 {
1058 thread_delete_fd (thread_array, thread);
1059 }
4becea72
CF
1060 else
1061 {
308d14ae 1062 assert(!"Thread should be either in queue or list or array!");
4becea72
CF
1063 }
1064
718e3744 1065 thread->type = THREAD_UNUSED;
1066 thread_add_unuse (thread->master, thread);
1067}
1068
1069/* Delete all events which has argument value arg. */
dc81807a 1070unsigned int
718e3744 1071thread_cancel_event (struct thread_master *m, void *arg)
1072{
dc81807a 1073 unsigned int ret = 0;
718e3744 1074 struct thread *thread;
1075
1076 thread = m->event.head;
1077 while (thread)
1078 {
1079 struct thread *t;
1080
1081 t = thread;
1082 thread = t->next;
1083
1084 if (t->arg == arg)
a48b4e6d 1085 {
dc81807a 1086 ret++;
a48b4e6d 1087 thread_list_delete (&m->event, t);
1088 t->type = THREAD_UNUSED;
1089 thread_add_unuse (m, t);
1090 }
718e3744 1091 }
1b79fcb6
JBD
1092
1093 /* thread can be on the ready list too */
1094 thread = m->ready.head;
1095 while (thread)
1096 {
1097 struct thread *t;
1098
1099 t = thread;
1100 thread = t->next;
1101
1102 if (t->arg == arg)
1103 {
1104 ret++;
1105 thread_list_delete (&m->ready, t);
1106 t->type = THREAD_UNUSED;
1107 thread_add_unuse (m, t);
1108 }
1109 }
dc81807a 1110 return ret;
718e3744 1111}
1112
a48b4e6d 1113static struct timeval *
4becea72 1114thread_timer_wait (struct pqueue *queue, struct timeval *timer_val)
718e3744 1115{
4becea72 1116 if (queue->size)
718e3744 1117 {
4becea72
CF
1118 struct thread *next_timer = queue->array[0];
1119 *timer_val = timeval_subtract (next_timer->u.sands, relative_time);
718e3744 1120 return timer_val;
1121 }
1122 return NULL;
1123}
718e3744 1124
8cc4198f 1125static struct thread *
718e3744 1126thread_run (struct thread_master *m, struct thread *thread,
1127 struct thread *fetch)
1128{
1129 *fetch = *thread;
1130 thread->type = THREAD_UNUSED;
1131 thread_add_unuse (m, thread);
1132 return fetch;
1133}
1134
a48b4e6d 1135static int
0a95a0d0 1136thread_process_fds_helper (struct thread_master *m, struct thread *thread, thread_fd_set *fdset, short int state, int pos)
5d4ccd4e 1137{
5d4ccd4e
DS
1138 struct thread **thread_array;
1139
1140 if (!thread)
1141 return 0;
1142
1143 if (thread->type == THREAD_READ)
0a95a0d0 1144 thread_array = m->read;
5d4ccd4e 1145 else
0a95a0d0 1146 thread_array = m->write;
5d4ccd4e 1147
0a95a0d0 1148 if (fd_is_set (thread, fdset, pos))
5d4ccd4e 1149 {
0a95a0d0 1150 fd_clear_read_write (thread);
5d4ccd4e
DS
1151 thread_delete_fd (thread_array, thread);
1152 thread_list_add (&m->ready, thread);
1153 thread->type = THREAD_READY;
0a95a0d0
DS
1154#if defined(HAVE_POLL)
1155 thread->master->handler.pfds[pos].events &= ~(state);
1156#endif
5d4ccd4e
DS
1157 return 1;
1158 }
1159 return 0;
1160}
1161
0a95a0d0
DS
1162#if defined(HAVE_POLL)
1163
1164#if defined(HAVE_SNMP)
1165/* add snmp fds to poll set */
1166static void
1167add_snmp_pollfds(struct thread_master *m, fd_set *snmpfds, int fdsetsize)
1168{
1169 int i;
1170 m->handler.pfdcountsnmp = m->handler.pfdcount;
1171 /* cycle trough fds and add neccessary fds to poll set */
1172 for (i=0;i<fdsetsize;++i)
1173 {
1174 if (FD_ISSET(i, snmpfds))
1175 {
b53e10a1 1176 assert (m->handler.pfdcountsnmp <= m->handler.pfdsize);
0a95a0d0
DS
1177
1178 m->handler.pfds[m->handler.pfdcountsnmp].fd = i;
1179 m->handler.pfds[m->handler.pfdcountsnmp].events = POLLIN;
1180 m->handler.pfdcountsnmp++;
1181 }
1182 }
1183}
1184#endif
1185
1186/* check poll events */
1187static void
1188check_pollfds(struct thread_master *m, fd_set *readfd, int num)
1189{
1190 nfds_t i = 0;
1191 int ready = 0;
aa037235 1192 for (i = 0; i < m->handler.pfdcount && ready < num ; ++i)
0a95a0d0
DS
1193 {
1194 /* no event for current fd? immideatly continue */
1195 if(m->handler.pfds[i].revents == 0)
1196 continue;
1197
c026ca1c 1198 ready++;
0a95a0d0
DS
1199
1200 /* POLLIN / POLLOUT process event */
1201 if (m->handler.pfds[i].revents & POLLIN)
c026ca1c 1202 thread_process_fds_helper(m, m->read[m->handler.pfds[i].fd], NULL, POLLIN, i);
0a95a0d0 1203 if (m->handler.pfds[i].revents & POLLOUT)
c026ca1c 1204 thread_process_fds_helper(m, m->write[m->handler.pfds[i].fd], NULL, POLLOUT, i);
0a95a0d0 1205
1bba4c93
DS
1206 /* remove fd from list on POLLNVAL */
1207 if (m->handler.pfds[i].revents & POLLNVAL ||
1208 m->handler.pfds[i].revents & POLLHUP)
0a95a0d0 1209 {
aa037235
DS
1210 memmove(m->handler.pfds+i,
1211 m->handler.pfds+i+1,
1212 (m->handler.pfdsize-i-1) * sizeof(struct pollfd));
1213 m->handler.pfdcount--;
1214 i--;
0a95a0d0
DS
1215 }
1216 else
aa037235 1217 m->handler.pfds[i].revents = 0;
0a95a0d0
DS
1218 }
1219}
1220#endif
1221
1222static void
5d4ccd4e 1223thread_process_fds (struct thread_master *m, thread_fd_set *rset, thread_fd_set *wset, int num)
718e3744 1224{
0a95a0d0
DS
1225#if defined (HAVE_POLL)
1226 check_pollfds (m, rset, num);
1227#else
308d14ae
DV
1228 int ready = 0, index;
1229
5d4ccd4e 1230 for (index = 0; index < m->fd_limit && ready < num; ++index)
718e3744 1231 {
0a95a0d0
DS
1232 ready += thread_process_fds_helper (m, m->read[index], rset, 0, 0);
1233 ready += thread_process_fds_helper (m, m->write[index], wset, 0, 0);
718e3744 1234 }
0a95a0d0 1235#endif
718e3744 1236}
1237
8b70d0b0 1238/* Add all timers that have popped to the ready list. */
a48b4e6d 1239static unsigned int
4becea72 1240thread_timer_process (struct pqueue *queue, struct timeval *timenow)
a48b4e6d 1241{
1242 struct thread *thread;
1243 unsigned int ready = 0;
1244
4becea72 1245 while (queue->size)
8b70d0b0 1246 {
4becea72 1247 thread = queue->array[0];
8b70d0b0 1248 if (timeval_cmp (*timenow, thread->u.sands) < 0)
1249 return ready;
4becea72 1250 pqueue_dequeue(queue);
8b70d0b0 1251 thread->type = THREAD_READY;
1252 thread_list_add (&thread->master->ready, thread);
1253 ready++;
1254 }
a48b4e6d 1255 return ready;
1256}
1257
2613abe6
PJ
1258/* process a list en masse, e.g. for event thread lists */
1259static unsigned int
1260thread_process (struct thread_list *list)
1261{
1262 struct thread *thread;
b5043aab 1263 struct thread *next;
2613abe6
PJ
1264 unsigned int ready = 0;
1265
b5043aab 1266 for (thread = list->head; thread; thread = next)
2613abe6 1267 {
b5043aab 1268 next = thread->next;
2613abe6
PJ
1269 thread_list_delete (list, thread);
1270 thread->type = THREAD_READY;
1271 thread_list_add (&thread->master->ready, thread);
1272 ready++;
1273 }
1274 return ready;
1275}
1276
1277
718e3744 1278/* Fetch next ready thread. */
1279struct thread *
1280thread_fetch (struct thread_master *m, struct thread *fetch)
1281{
718e3744 1282 struct thread *thread;
209a72a6
DS
1283 thread_fd_set readfd;
1284 thread_fd_set writefd;
1285 thread_fd_set exceptfd;
2613abe6 1286 struct timeval timer_val = { .tv_sec = 0, .tv_usec = 0 };
a48b4e6d 1287 struct timeval timer_val_bg;
2613abe6 1288 struct timeval *timer_wait = &timer_val;
a48b4e6d 1289 struct timeval *timer_wait_bg;
718e3744 1290
1291 while (1)
1292 {
a48b4e6d 1293 int num = 0;
56e2c5e8 1294
2613abe6 1295 /* Signals pre-empt everything */
05c447dd 1296 quagga_sigevent_process ();
1297
2613abe6
PJ
1298 /* Drain the ready queue of already scheduled jobs, before scheduling
1299 * more.
a48b4e6d 1300 */
718e3744 1301 if ((thread = thread_trim_head (&m->ready)) != NULL)
05c447dd 1302 return thread_run (m, thread, fetch);
a48b4e6d 1303
2613abe6
PJ
1304 /* To be fair to all kinds of threads, and avoid starvation, we
1305 * need to be careful to consider all thread types for scheduling
1306 * in each quanta. I.e. we should not return early from here on.
1307 */
1308
1309 /* Normal event are the next highest priority. */
1310 thread_process (&m->event);
1311
718e3744 1312 /* Structure copy. */
0a95a0d0
DS
1313#if !defined(HAVE_POLL)
1314 readfd = fd_copy_fd_set(m->handler.readfd);
1315 writefd = fd_copy_fd_set(m->handler.writefd);
1316 exceptfd = fd_copy_fd_set(m->handler.exceptfd);
1317#endif
a48b4e6d 1318
1319 /* Calculate select wait timer if nothing else to do */
2613abe6
PJ
1320 if (m->ready.count == 0)
1321 {
1322 quagga_get_relative (NULL);
4becea72
CF
1323 timer_wait = thread_timer_wait (m->timer, &timer_val);
1324 timer_wait_bg = thread_timer_wait (m->background, &timer_val_bg);
2613abe6
PJ
1325
1326 if (timer_wait_bg &&
1327 (!timer_wait || (timeval_cmp (*timer_wait, *timer_wait_bg) > 0)))
1328 timer_wait = timer_wait_bg;
1329 }
56e2c5e8 1330
0a95a0d0 1331 num = fd_select (m, FD_SETSIZE, &readfd, &writefd, &exceptfd, timer_wait);
a48b4e6d 1332
1333 /* Signals should get quick treatment */
718e3744 1334 if (num < 0)
05c447dd 1335 {
1336 if (errno == EINTR)
a48b4e6d 1337 continue; /* signal received - process it */
6099b3b5 1338 zlog_warn ("select() error: %s", safe_strerror (errno));
5d4ccd4e 1339 return NULL;
05c447dd 1340 }
8b70d0b0 1341
1342 /* Check foreground timers. Historically, they have had higher
1343 priority than I/O threads, so let's push them onto the ready
1344 list in front of the I/O threads. */
db9c0df9 1345 quagga_get_relative (NULL);
4becea72 1346 thread_timer_process (m->timer, &relative_time);
a48b4e6d 1347
1348 /* Got IO, process it */
1349 if (num > 0)
5d4ccd4e 1350 thread_process_fds (m, &readfd, &writefd, num);
8b70d0b0 1351
1352#if 0
1353 /* If any threads were made ready above (I/O or foreground timer),
1354 perhaps we should avoid adding background timers to the ready
1355 list at this time. If this is code is uncommented, then background
1356 timer threads will not run unless there is nothing else to do. */
1357 if ((thread = thread_trim_head (&m->ready)) != NULL)
1358 return thread_run (m, thread, fetch);
1359#endif
1360
a48b4e6d 1361 /* Background timer/events, lowest priority */
4becea72 1362 thread_timer_process (m->background, &relative_time);
a48b4e6d 1363
8b70d0b0 1364 if ((thread = thread_trim_head (&m->ready)) != NULL)
05c447dd 1365 return thread_run (m, thread, fetch);
718e3744 1366 }
1367}
1368
924b9229 1369unsigned long
8b70d0b0 1370thread_consumed_time (RUSAGE_T *now, RUSAGE_T *start, unsigned long *cputime)
718e3744 1371{
718e3744 1372#ifdef HAVE_RUSAGE
1373 /* This is 'user + sys' time. */
8b70d0b0 1374 *cputime = timeval_elapsed (now->cpu.ru_utime, start->cpu.ru_utime) +
1375 timeval_elapsed (now->cpu.ru_stime, start->cpu.ru_stime);
718e3744 1376#else
8b70d0b0 1377 *cputime = 0;
718e3744 1378#endif /* HAVE_RUSAGE */
8b70d0b0 1379 return timeval_elapsed (now->real, start->real);
1380}
1381
50596be0
DS
1382/* We should aim to yield after yield milliseconds, which defaults
1383 to THREAD_YIELD_TIME_SLOT .
8b70d0b0 1384 Note: we are using real (wall clock) time for this calculation.
1385 It could be argued that CPU time may make more sense in certain
1386 contexts. The things to consider are whether the thread may have
1387 blocked (in which case wall time increases, but CPU time does not),
1388 or whether the system is heavily loaded with other processes competing
1389 for CPU time. On balance, wall clock time seems to make sense.
1390 Plus it has the added benefit that gettimeofday should be faster
1391 than calling getrusage. */
718e3744 1392int
1393thread_should_yield (struct thread *thread)
1394{
db9c0df9 1395 quagga_get_relative (NULL);
41af338e 1396 return (timeval_elapsed(relative_time, thread->real) >
50596be0
DS
1397 thread->yield);
1398}
1399
1400void
1401thread_set_yield_time (struct thread *thread, unsigned long yield_time)
1402{
1403 thread->yield = yield_time;
718e3744 1404}
1405
db9c0df9
PJ
1406void
1407thread_getrusage (RUSAGE_T *r)
1408{
1409 quagga_get_relative (NULL);
1410#ifdef HAVE_RUSAGE
1411 getrusage(RUSAGE_SELF, &(r->cpu));
1412#endif
1413 r->real = relative_time;
1414
1415#ifdef HAVE_CLOCK_MONOTONIC
1416 /* quagga_get_relative() only updates recent_time if gettimeofday
1417 * based, not when using CLOCK_MONOTONIC. As we export recent_time
1418 * and guarantee to update it before threads are run...
1419 */
1420 quagga_gettimeofday(&recent_time);
1421#endif /* HAVE_CLOCK_MONOTONIC */
1422}
1423
d1265948
DL
1424struct thread *thread_current = NULL;
1425
718e3744 1426/* We check thread consumed time. If the system has getrusage, we'll
8b70d0b0 1427 use that to get in-depth stats on the performance of the thread in addition
1428 to wall clock time stats from gettimeofday. */
718e3744 1429void
1430thread_call (struct thread *thread)
1431{
8b70d0b0 1432 unsigned long realtime, cputime;
41af338e 1433 RUSAGE_T before, after;
cc8b13a0
PJ
1434
1435 /* Cache a pointer to the relevant cpu history thread, if the thread
1436 * does not have it yet.
1437 *
1438 * Callers submitting 'dummy threads' hence must take care that
1439 * thread->cpu is NULL
1440 */
1441 if (!thread->hist)
1442 {
1443 struct cpu_thread_history tmp;
1444
1445 tmp.func = thread->func;
9c7753e4 1446 tmp.funcname = thread->funcname;
cc8b13a0
PJ
1447
1448 thread->hist = hash_get (cpu_record, &tmp,
1449 (void * (*) (void *))cpu_record_hash_alloc);
1450 }
718e3744 1451
41af338e
JBD
1452 GETRUSAGE (&before);
1453 thread->real = before.real;
718e3744 1454
d1265948 1455 thread_current = thread;
718e3744 1456 (*thread->func) (thread);
d1265948 1457 thread_current = NULL;
718e3744 1458
41af338e 1459 GETRUSAGE (&after);
718e3744 1460
41af338e 1461 realtime = thread_consumed_time (&after, &before, &cputime);
cc8b13a0
PJ
1462 thread->hist->real.total += realtime;
1463 if (thread->hist->real.max < realtime)
1464 thread->hist->real.max = realtime;
8b70d0b0 1465#ifdef HAVE_RUSAGE
cc8b13a0
PJ
1466 thread->hist->cpu.total += cputime;
1467 if (thread->hist->cpu.max < cputime)
1468 thread->hist->cpu.max = cputime;
8b70d0b0 1469#endif
e04ab74d 1470
cc8b13a0
PJ
1471 ++(thread->hist->total_calls);
1472 thread->hist->types |= (1 << thread->add_type);
718e3744 1473
924b9229 1474#ifdef CONSUMED_TIME_CHECK
8b70d0b0 1475 if (realtime > CONSUMED_TIME_CHECK)
718e3744 1476 {
1477 /*
1478 * We have a CPU Hog on our hands.
1479 * Whinge about it now, so we're aware this is yet another task
1480 * to fix.
1481 */
8b70d0b0 1482 zlog_warn ("SLOW THREAD: task %s (%lx) ran for %lums (cpu time %lums)",
924b9229 1483 thread->funcname,
1484 (unsigned long) thread->func,
8b70d0b0 1485 realtime/1000, cputime/1000);
718e3744 1486 }
924b9229 1487#endif /* CONSUMED_TIME_CHECK */
718e3744 1488}
1489
1490/* Execute thread */
1491struct thread *
e04ab74d 1492funcname_thread_execute (struct thread_master *m,
718e3744 1493 int (*func)(struct thread *),
1494 void *arg,
e04ab74d 1495 int val,
9c7753e4 1496 debugargdef)
718e3744 1497{
1498 struct thread dummy;
1499
1500 memset (&dummy, 0, sizeof (struct thread));
1501
1502 dummy.type = THREAD_EVENT;
e04ab74d 1503 dummy.add_type = THREAD_EXECUTE;
718e3744 1504 dummy.master = NULL;
1505 dummy.func = func;
1506 dummy.arg = arg;
1507 dummy.u.val = val;
9c7753e4
DL
1508
1509 dummy.funcname = funcname;
1510 dummy.schedfrom = schedfrom;
1511 dummy.schedfrom_line = fromln;
1512
718e3744 1513 thread_call (&dummy);
1514
1515 return NULL;
1516}