1 /* Thread management routine
2 * Copyright (C) 1998, 2000 Kunihiro Ishiguro <kunihiro@zebra.org>
4 * This file is part of GNU Zebra.
6 * GNU Zebra is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2, or (at your option) any
11 * GNU Zebra is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with GNU Zebra; see the file COPYING. If not, write to the Free
18 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
33 /* Recent absolute time of day */
34 struct timeval recent_time
;
35 static struct timeval last_recent_time
;
36 /* Relative time, since startup */
37 static struct timeval relative_time
;
38 static struct timeval relative_time_base
;
40 static unsigned short timers_inited
;
42 static struct hash
*cpu_record
= NULL
;
44 /* Struct timeval's tv_usec one second value. */
45 #define TIMER_SECOND_MICRO 1000000L
47 /* Adjust so that tv_usec is in the range [0,TIMER_SECOND_MICRO).
48 And change negative values to 0. */
50 timeval_adjust (struct timeval a
)
52 while (a
.tv_usec
>= TIMER_SECOND_MICRO
)
54 a
.tv_usec
-= TIMER_SECOND_MICRO
;
60 a
.tv_usec
+= TIMER_SECOND_MICRO
;
65 /* Change negative timeouts to 0. */
66 a
.tv_sec
= a
.tv_usec
= 0;
72 timeval_subtract (struct timeval a
, struct timeval b
)
76 ret
.tv_usec
= a
.tv_usec
- b
.tv_usec
;
77 ret
.tv_sec
= a
.tv_sec
- b
.tv_sec
;
79 return timeval_adjust (ret
);
83 timeval_cmp (struct timeval a
, struct timeval b
)
85 return (a
.tv_sec
== b
.tv_sec
86 ? a
.tv_usec
- b
.tv_usec
: a
.tv_sec
- b
.tv_sec
);
90 timeval_elapsed (struct timeval a
, struct timeval b
)
92 return (((a
.tv_sec
- b
.tv_sec
) * TIMER_SECOND_MICRO
)
93 + (a
.tv_usec
- b
.tv_usec
));
96 #ifndef HAVE_CLOCK_MONOTONIC
98 quagga_gettimeofday_relative_adjust (void)
101 if (timeval_cmp (recent_time
, last_recent_time
) < 0)
103 relative_time
.tv_sec
++;
104 relative_time
.tv_usec
= 0;
108 diff
= timeval_subtract (recent_time
, last_recent_time
);
109 relative_time
.tv_sec
+= diff
.tv_sec
;
110 relative_time
.tv_usec
+= diff
.tv_usec
;
111 relative_time
= timeval_adjust (relative_time
);
113 last_recent_time
= recent_time
;
115 #endif /* !HAVE_CLOCK_MONOTONIC */
117 /* gettimeofday wrapper, to keep recent_time updated */
119 quagga_gettimeofday (struct timeval
*tv
)
125 if (!(ret
= gettimeofday (&recent_time
, NULL
)))
130 relative_time_base
= last_recent_time
= recent_time
;
133 /* avoid copy if user passed recent_time pointer.. */
134 if (tv
!= &recent_time
)
142 quagga_get_relative (struct timeval
*tv
)
146 #ifdef HAVE_CLOCK_MONOTONIC
149 if (!(ret
= clock_gettime (CLOCK_MONOTONIC
, &tp
)))
151 relative_time
.tv_sec
= tp
.tv_sec
;
152 relative_time
.tv_usec
= tp
.tv_nsec
/ 1000;
155 #else /* !HAVE_CLOCK_MONOTONIC */
156 if (!(ret
= quagga_gettimeofday (&recent_time
)))
157 quagga_gettimeofday_relative_adjust();
158 #endif /* HAVE_CLOCK_MONOTONIC */
166 /* Get absolute time stamp, but in terms of the internal timer
167 * Could be wrong, but at least won't go back.
170 quagga_real_stabilised (struct timeval
*tv
)
172 *tv
= relative_time_base
;
173 tv
->tv_sec
+= relative_time
.tv_sec
;
174 tv
->tv_usec
+= relative_time
.tv_usec
;
175 *tv
= timeval_adjust (*tv
);
178 /* Exported Quagga timestamp function.
179 * Modelled on POSIX clock_gettime.
182 quagga_gettime (enum quagga_clkid clkid
, struct timeval
*tv
)
186 case QUAGGA_CLK_REALTIME
:
187 return quagga_gettimeofday (tv
);
188 case QUAGGA_CLK_MONOTONIC
:
189 return quagga_get_relative (tv
);
190 case QUAGGA_CLK_REALTIME_STABILISED
:
191 quagga_real_stabilised (tv
);
199 /* time_t value in terms of stabilised absolute time.
200 * replacement for POSIX time()
203 quagga_time (time_t *t
)
206 quagga_real_stabilised (&tv
);
212 /* Public export of recent_relative_time by value */
214 recent_relative_time (void)
216 return relative_time
;
220 cpu_record_hash_key (struct cpu_thread_history
*a
)
222 return (uintptr_t) a
->func
;
226 cpu_record_hash_cmp (const struct cpu_thread_history
*a
,
227 const struct cpu_thread_history
*b
)
229 return a
->func
== b
->func
;
233 cpu_record_hash_alloc (struct cpu_thread_history
*a
)
235 struct cpu_thread_history
*new;
236 new = XCALLOC (MTYPE_THREAD_STATS
, sizeof (struct cpu_thread_history
));
238 new->funcname
= XSTRDUP(MTYPE_THREAD_FUNCNAME
, a
->funcname
);
243 cpu_record_hash_free (void *a
)
245 struct cpu_thread_history
*hist
= a
;
247 XFREE (MTYPE_THREAD_FUNCNAME
, hist
->funcname
);
248 XFREE (MTYPE_THREAD_STATS
, hist
);
252 vty_out_cpu_thread_history(struct vty
* vty
,
253 struct cpu_thread_history
*a
)
256 vty_out(vty
, "%7ld.%03ld %9d %8ld %9ld %8ld %9ld",
257 a
->cpu
.total
/1000, a
->cpu
.total
%1000, a
->total_calls
,
258 a
->cpu
.total
/a
->total_calls
, a
->cpu
.max
,
259 a
->real
.total
/a
->total_calls
, a
->real
.max
);
261 vty_out(vty
, "%7ld.%03ld %9d %8ld %9ld",
262 a
->real
.total
/1000, a
->real
.total
%1000, a
->total_calls
,
263 a
->real
.total
/a
->total_calls
, a
->real
.max
);
265 vty_out(vty
, " %c%c%c%c%c%c %s%s",
266 a
->types
& (1 << THREAD_READ
) ? 'R':' ',
267 a
->types
& (1 << THREAD_WRITE
) ? 'W':' ',
268 a
->types
& (1 << THREAD_TIMER
) ? 'T':' ',
269 a
->types
& (1 << THREAD_EVENT
) ? 'E':' ',
270 a
->types
& (1 << THREAD_EXECUTE
) ? 'X':' ',
271 a
->types
& (1 << THREAD_BACKGROUND
) ? 'B' : ' ',
272 a
->funcname
, VTY_NEWLINE
);
276 cpu_record_hash_print(struct hash_backet
*bucket
,
279 struct cpu_thread_history
*totals
= args
[0];
280 struct vty
*vty
= args
[1];
281 thread_type
*filter
= args
[2];
282 struct cpu_thread_history
*a
= bucket
->data
;
285 if ( !(a
->types
& *filter
) )
287 vty_out_cpu_thread_history(vty
,a
);
288 totals
->total_calls
+= a
->total_calls
;
289 totals
->real
.total
+= a
->real
.total
;
290 if (totals
->real
.max
< a
->real
.max
)
291 totals
->real
.max
= a
->real
.max
;
293 totals
->cpu
.total
+= a
->cpu
.total
;
294 if (totals
->cpu
.max
< a
->cpu
.max
)
295 totals
->cpu
.max
= a
->cpu
.max
;
300 cpu_record_print(struct vty
*vty
, thread_type filter
)
302 struct cpu_thread_history tmp
;
303 void *args
[3] = {&tmp
, vty
, &filter
};
305 memset(&tmp
, 0, sizeof tmp
);
306 tmp
.funcname
= (char *)"TOTAL";
310 vty_out(vty
, "%21s %18s %18s%s",
311 "", "CPU (user+system):", "Real (wall-clock):", VTY_NEWLINE
);
313 vty_out(vty
, "Runtime(ms) Invoked Avg uSec Max uSecs");
315 vty_out(vty
, " Avg uSec Max uSecs");
317 vty_out(vty
, " Type Thread%s", VTY_NEWLINE
);
318 hash_iterate(cpu_record
,
319 (void(*)(struct hash_backet
*,void*))cpu_record_hash_print
,
322 if (tmp
.total_calls
> 0)
323 vty_out_cpu_thread_history(vty
, &tmp
);
326 DEFUN(show_thread_cpu
,
328 "show thread cpu [FILTER]",
330 "Thread information\n"
332 "Display filter (rwtexb)\n")
335 thread_type filter
= (thread_type
) -1U;
340 while (argv
[0][i
] != '\0')
342 switch ( argv
[0][i
] )
346 filter
|= (1 << THREAD_READ
);
350 filter
|= (1 << THREAD_WRITE
);
354 filter
|= (1 << THREAD_TIMER
);
358 filter
|= (1 << THREAD_EVENT
);
362 filter
|= (1 << THREAD_EXECUTE
);
366 filter
|= (1 << THREAD_BACKGROUND
);
375 vty_out(vty
, "Invalid filter \"%s\" specified,"
376 " must contain at least one of 'RWTEXB'%s",
377 argv
[0], VTY_NEWLINE
);
382 cpu_record_print(vty
, filter
);
387 cpu_record_hash_clear (struct hash_backet
*bucket
,
390 thread_type
*filter
= args
;
391 struct cpu_thread_history
*a
= bucket
->data
;
394 if ( !(a
->types
& *filter
) )
397 hash_release (cpu_record
, bucket
->data
);
401 cpu_record_clear (thread_type filter
)
403 thread_type
*tmp
= &filter
;
404 hash_iterate (cpu_record
,
405 (void (*) (struct hash_backet
*,void*)) cpu_record_hash_clear
,
409 DEFUN(clear_thread_cpu
,
410 clear_thread_cpu_cmd
,
411 "clear thread cpu [FILTER]",
412 "Clear stored data\n"
413 "Thread information\n"
415 "Display filter (rwtexb)\n")
418 thread_type filter
= (thread_type
) -1U;
423 while (argv
[0][i
] != '\0')
425 switch ( argv
[0][i
] )
429 filter
|= (1 << THREAD_READ
);
433 filter
|= (1 << THREAD_WRITE
);
437 filter
|= (1 << THREAD_TIMER
);
441 filter
|= (1 << THREAD_EVENT
);
445 filter
|= (1 << THREAD_EXECUTE
);
449 filter
|= (1 << THREAD_BACKGROUND
);
458 vty_out(vty
, "Invalid filter \"%s\" specified,"
459 " must contain at least one of 'RWTEXB'%s",
460 argv
[0], VTY_NEWLINE
);
465 cpu_record_clear (filter
);
469 /* List allocation and head/tail print out. */
471 thread_list_debug (struct thread_list
*list
)
473 printf ("count [%d] head [%p] tail [%p]\n",
474 list
->count
, list
->head
, list
->tail
);
477 /* Debug print for thread_master. */
478 static void __attribute__ ((unused
))
479 thread_master_debug (struct thread_master
*m
)
481 printf ("-----------\n");
482 printf ("readlist : ");
483 thread_list_debug (&m
->read
);
484 printf ("writelist : ");
485 thread_list_debug (&m
->write
);
486 printf ("timerlist : ");
487 thread_list_debug (&m
->timer
);
488 printf ("eventlist : ");
489 thread_list_debug (&m
->event
);
490 printf ("unuselist : ");
491 thread_list_debug (&m
->unuse
);
492 printf ("bgndlist : ");
493 thread_list_debug (&m
->background
);
494 printf ("total alloc: [%ld]\n", m
->alloc
);
495 printf ("-----------\n");
498 /* Allocate new thread master. */
499 struct thread_master
*
500 thread_master_create ()
502 if (cpu_record
== NULL
)
504 = hash_create_size (1011, (unsigned int (*) (void *))cpu_record_hash_key
,
505 (int (*) (const void *, const void *))cpu_record_hash_cmp
);
507 return (struct thread_master
*) XCALLOC (MTYPE_THREAD_MASTER
,
508 sizeof (struct thread_master
));
511 /* Add a new thread to the list. */
513 thread_list_add (struct thread_list
*list
, struct thread
*thread
)
516 thread
->prev
= list
->tail
;
518 list
->tail
->next
= thread
;
525 /* Add a new thread just before the point. */
527 thread_list_add_before (struct thread_list
*list
,
528 struct thread
*point
,
529 struct thread
*thread
)
531 thread
->next
= point
;
532 thread
->prev
= point
->prev
;
534 point
->prev
->next
= thread
;
537 point
->prev
= thread
;
541 /* Delete a thread from the list. */
542 static struct thread
*
543 thread_list_delete (struct thread_list
*list
, struct thread
*thread
)
546 thread
->next
->prev
= thread
->prev
;
548 list
->tail
= thread
->prev
;
550 thread
->prev
->next
= thread
->next
;
552 list
->head
= thread
->next
;
553 thread
->next
= thread
->prev
= NULL
;
558 /* Move thread to unuse list. */
560 thread_add_unuse (struct thread_master
*m
, struct thread
*thread
)
562 assert (m
!= NULL
&& thread
!= NULL
);
563 assert (thread
->next
== NULL
);
564 assert (thread
->prev
== NULL
);
565 assert (thread
->type
== THREAD_UNUSED
);
566 thread_list_add (&m
->unuse
, thread
);
567 /* XXX: Should we deallocate funcname here? */
570 /* Free all unused thread. */
572 thread_list_free (struct thread_master
*m
, struct thread_list
*list
)
577 for (t
= list
->head
; t
; t
= next
)
581 XFREE (MTYPE_THREAD_FUNCNAME
, t
->funcname
);
582 XFREE (MTYPE_THREAD
, t
);
588 /* Stop thread scheduler. */
590 thread_master_free (struct thread_master
*m
)
592 thread_list_free (m
, &m
->read
);
593 thread_list_free (m
, &m
->write
);
594 thread_list_free (m
, &m
->timer
);
595 thread_list_free (m
, &m
->event
);
596 thread_list_free (m
, &m
->ready
);
597 thread_list_free (m
, &m
->unuse
);
598 thread_list_free (m
, &m
->background
);
600 XFREE (MTYPE_THREAD_MASTER
, m
);
604 hash_clean (cpu_record
, cpu_record_hash_free
);
605 hash_free (cpu_record
);
610 /* Thread list is empty or not. */
612 thread_empty (struct thread_list
*list
)
614 return list
->head
? 0 : 1;
617 /* Delete top of the list and return it. */
618 static struct thread
*
619 thread_trim_head (struct thread_list
*list
)
621 if (!thread_empty (list
))
622 return thread_list_delete (list
, list
->head
);
626 /* Return remain time in second. */
628 thread_timer_remain_second (struct thread
*thread
)
630 quagga_get_relative (NULL
);
632 if (thread
->u
.sands
.tv_sec
- relative_time
.tv_sec
> 0)
633 return thread
->u
.sands
.tv_sec
- relative_time
.tv_sec
;
638 /* Trim blankspace and "()"s */
640 strip_funcname (const char *funcname
)
643 char tmp
, *ret
, *e
, *b
= buff
;
645 strncpy(buff
, funcname
, sizeof(buff
));
646 buff
[ sizeof(buff
) -1] = '\0';
647 e
= buff
+strlen(buff
) -1;
649 /* Wont work for funcname == "Word (explanation)" */
651 while (*b
== ' ' || *b
== '(')
653 while (*e
== ' ' || *e
== ')')
659 ret
= XSTRDUP (MTYPE_THREAD_FUNCNAME
, b
);
665 /* Get new thread. */
666 static struct thread
*
667 thread_get (struct thread_master
*m
, u_char type
,
668 int (*func
) (struct thread
*), void *arg
, const char* funcname
)
670 struct thread
*thread
;
672 if (!thread_empty (&m
->unuse
))
674 thread
= thread_trim_head (&m
->unuse
);
675 if (thread
->funcname
)
676 XFREE(MTYPE_THREAD_FUNCNAME
, thread
->funcname
);
680 thread
= XCALLOC (MTYPE_THREAD
, sizeof (struct thread
));
684 thread
->add_type
= type
;
689 thread
->funcname
= strip_funcname(funcname
);
694 /* Add new read thread. */
696 funcname_thread_add_read (struct thread_master
*m
,
697 int (*func
) (struct thread
*), void *arg
, int fd
, const char* funcname
)
699 struct thread
*thread
;
703 if (FD_ISSET (fd
, &m
->readfd
))
705 zlog (NULL
, LOG_WARNING
, "There is already read fd [%d]", fd
);
709 thread
= thread_get (m
, THREAD_READ
, func
, arg
, funcname
);
710 FD_SET (fd
, &m
->readfd
);
712 thread_list_add (&m
->read
, thread
);
717 /* Add new write thread. */
719 funcname_thread_add_write (struct thread_master
*m
,
720 int (*func
) (struct thread
*), void *arg
, int fd
, const char* funcname
)
722 struct thread
*thread
;
726 if (FD_ISSET (fd
, &m
->writefd
))
728 zlog (NULL
, LOG_WARNING
, "There is already write fd [%d]", fd
);
732 thread
= thread_get (m
, THREAD_WRITE
, func
, arg
, funcname
);
733 FD_SET (fd
, &m
->writefd
);
735 thread_list_add (&m
->write
, thread
);
740 static struct thread
*
741 funcname_thread_add_timer_timeval (struct thread_master
*m
,
742 int (*func
) (struct thread
*),
745 struct timeval
*time_relative
,
746 const char* funcname
)
748 struct thread
*thread
;
749 struct thread_list
*list
;
750 struct timeval alarm_time
;
755 assert (type
== THREAD_TIMER
|| type
== THREAD_BACKGROUND
);
756 assert (time_relative
);
758 list
= ((type
== THREAD_TIMER
) ? &m
->timer
: &m
->background
);
759 thread
= thread_get (m
, type
, func
, arg
, funcname
);
761 /* Do we need jitter here? */
762 quagga_get_relative (NULL
);
763 alarm_time
.tv_sec
= relative_time
.tv_sec
+ time_relative
->tv_sec
;
764 alarm_time
.tv_usec
= relative_time
.tv_usec
+ time_relative
->tv_usec
;
765 thread
->u
.sands
= timeval_adjust(alarm_time
);
767 /* Sort by timeval. */
768 for (tt
= list
->head
; tt
; tt
= tt
->next
)
769 if (timeval_cmp (thread
->u
.sands
, tt
->u
.sands
) <= 0)
773 thread_list_add_before (list
, tt
, thread
);
775 thread_list_add (list
, thread
);
781 /* Add timer event thread. */
783 funcname_thread_add_timer (struct thread_master
*m
,
784 int (*func
) (struct thread
*),
785 void *arg
, long timer
, const char* funcname
)
794 return funcname_thread_add_timer_timeval (m
, func
, THREAD_TIMER
, arg
,
798 /* Add timer event thread with "millisecond" resolution */
800 funcname_thread_add_timer_msec (struct thread_master
*m
,
801 int (*func
) (struct thread
*),
802 void *arg
, long timer
, const char* funcname
)
808 trel
.tv_sec
= timer
/ 1000;
809 trel
.tv_usec
= 1000*(timer
% 1000);
811 return funcname_thread_add_timer_timeval (m
, func
, THREAD_TIMER
,
812 arg
, &trel
, funcname
);
815 /* Add a background thread, with an optional millisec delay */
817 funcname_thread_add_background (struct thread_master
*m
,
818 int (*func
) (struct thread
*),
819 void *arg
, long delay
,
820 const char *funcname
)
828 trel
.tv_sec
= delay
/ 1000;
829 trel
.tv_usec
= 1000*(delay
% 1000);
837 return funcname_thread_add_timer_timeval (m
, func
, THREAD_BACKGROUND
,
838 arg
, &trel
, funcname
);
841 /* Add simple event thread. */
843 funcname_thread_add_event (struct thread_master
*m
,
844 int (*func
) (struct thread
*), void *arg
, int val
, const char* funcname
)
846 struct thread
*thread
;
850 thread
= thread_get (m
, THREAD_EVENT
, func
, arg
, funcname
);
852 thread_list_add (&m
->event
, thread
);
857 /* Cancel thread from scheduler. */
859 thread_cancel (struct thread
*thread
)
861 struct thread_list
*list
;
863 switch (thread
->type
)
866 assert (FD_ISSET (thread
->u
.fd
, &thread
->master
->readfd
));
867 FD_CLR (thread
->u
.fd
, &thread
->master
->readfd
);
868 list
= &thread
->master
->read
;
871 assert (FD_ISSET (thread
->u
.fd
, &thread
->master
->writefd
));
872 FD_CLR (thread
->u
.fd
, &thread
->master
->writefd
);
873 list
= &thread
->master
->write
;
876 list
= &thread
->master
->timer
;
879 list
= &thread
->master
->event
;
882 list
= &thread
->master
->ready
;
884 case THREAD_BACKGROUND
:
885 list
= &thread
->master
->background
;
891 thread_list_delete (list
, thread
);
892 thread
->type
= THREAD_UNUSED
;
893 thread_add_unuse (thread
->master
, thread
);
896 /* Delete all events which has argument value arg. */
898 thread_cancel_event (struct thread_master
*m
, void *arg
)
900 unsigned int ret
= 0;
901 struct thread
*thread
;
903 thread
= m
->event
.head
;
914 thread_list_delete (&m
->event
, t
);
915 t
->type
= THREAD_UNUSED
;
916 thread_add_unuse (m
, t
);
922 static struct timeval
*
923 thread_timer_wait (struct thread_list
*tlist
, struct timeval
*timer_val
)
925 if (!thread_empty (tlist
))
927 *timer_val
= timeval_subtract (tlist
->head
->u
.sands
, relative_time
);
933 static struct thread
*
934 thread_run (struct thread_master
*m
, struct thread
*thread
,
935 struct thread
*fetch
)
938 thread
->type
= THREAD_UNUSED
;
939 thread
->funcname
= NULL
; /* thread_call will free fetch's copied pointer */
940 thread_add_unuse (m
, thread
);
945 thread_process_fd (struct thread_list
*list
, fd_set
*fdset
, fd_set
*mfdset
)
947 struct thread
*thread
;
953 for (thread
= list
->head
; thread
; thread
= next
)
957 if (FD_ISSET (THREAD_FD (thread
), fdset
))
959 assert (FD_ISSET (THREAD_FD (thread
), mfdset
));
960 FD_CLR(THREAD_FD (thread
), mfdset
);
961 thread_list_delete (list
, thread
);
962 thread_list_add (&thread
->master
->ready
, thread
);
963 thread
->type
= THREAD_READY
;
970 /* Add all timers that have popped to the ready list. */
972 thread_timer_process (struct thread_list
*list
, struct timeval
*timenow
)
974 struct thread
*thread
;
976 unsigned int ready
= 0;
978 for (thread
= list
->head
; thread
; thread
= next
)
981 if (timeval_cmp (*timenow
, thread
->u
.sands
) < 0)
983 thread_list_delete (list
, thread
);
984 thread
->type
= THREAD_READY
;
985 thread_list_add (&thread
->master
->ready
, thread
);
991 /* process a list en masse, e.g. for event thread lists */
993 thread_process (struct thread_list
*list
)
995 struct thread
*thread
;
997 unsigned int ready
= 0;
999 for (thread
= list
->head
; thread
; thread
= next
)
1001 next
= thread
->next
;
1002 thread_list_delete (list
, thread
);
1003 thread
->type
= THREAD_READY
;
1004 thread_list_add (&thread
->master
->ready
, thread
);
1011 /* Fetch next ready thread. */
1013 thread_fetch (struct thread_master
*m
, struct thread
*fetch
)
1015 struct thread
*thread
;
1019 struct timeval timer_val
= { .tv_sec
= 0, .tv_usec
= 0 };
1020 struct timeval timer_val_bg
;
1021 struct timeval
*timer_wait
= &timer_val
;
1022 struct timeval
*timer_wait_bg
;
1028 /* Signals pre-empt everything */
1029 quagga_sigevent_process ();
1031 /* Drain the ready queue of already scheduled jobs, before scheduling
1034 if ((thread
= thread_trim_head (&m
->ready
)) != NULL
)
1035 return thread_run (m
, thread
, fetch
);
1037 /* To be fair to all kinds of threads, and avoid starvation, we
1038 * need to be careful to consider all thread types for scheduling
1039 * in each quanta. I.e. we should not return early from here on.
1042 /* Normal event are the next highest priority. */
1043 thread_process (&m
->event
);
1045 /* Structure copy. */
1047 writefd
= m
->writefd
;
1048 exceptfd
= m
->exceptfd
;
1050 /* Calculate select wait timer if nothing else to do */
1051 if (m
->ready
.count
== 0)
1053 quagga_get_relative (NULL
);
1054 timer_wait
= thread_timer_wait (&m
->timer
, &timer_val
);
1055 timer_wait_bg
= thread_timer_wait (&m
->background
, &timer_val_bg
);
1057 if (timer_wait_bg
&&
1058 (!timer_wait
|| (timeval_cmp (*timer_wait
, *timer_wait_bg
) > 0)))
1059 timer_wait
= timer_wait_bg
;
1062 num
= select (FD_SETSIZE
, &readfd
, &writefd
, &exceptfd
, timer_wait
);
1064 /* Signals should get quick treatment */
1068 continue; /* signal received - process it */
1069 zlog_warn ("select() error: %s", safe_strerror (errno
));
1073 /* Check foreground timers. Historically, they have had higher
1074 priority than I/O threads, so let's push them onto the ready
1075 list in front of the I/O threads. */
1076 quagga_get_relative (NULL
);
1077 thread_timer_process (&m
->timer
, &relative_time
);
1079 /* Got IO, process it */
1082 /* Normal priority read thead. */
1083 thread_process_fd (&m
->read
, &readfd
, &m
->readfd
);
1085 thread_process_fd (&m
->write
, &writefd
, &m
->writefd
);
1089 /* If any threads were made ready above (I/O or foreground timer),
1090 perhaps we should avoid adding background timers to the ready
1091 list at this time. If this is code is uncommented, then background
1092 timer threads will not run unless there is nothing else to do. */
1093 if ((thread
= thread_trim_head (&m
->ready
)) != NULL
)
1094 return thread_run (m
, thread
, fetch
);
1097 /* Background timer/events, lowest priority */
1098 thread_timer_process (&m
->background
, &relative_time
);
1100 if ((thread
= thread_trim_head (&m
->ready
)) != NULL
)
1101 return thread_run (m
, thread
, fetch
);
1106 thread_consumed_time (RUSAGE_T
*now
, RUSAGE_T
*start
, unsigned long *cputime
)
1109 /* This is 'user + sys' time. */
1110 *cputime
= timeval_elapsed (now
->cpu
.ru_utime
, start
->cpu
.ru_utime
) +
1111 timeval_elapsed (now
->cpu
.ru_stime
, start
->cpu
.ru_stime
);
1114 #endif /* HAVE_RUSAGE */
1115 return timeval_elapsed (now
->real
, start
->real
);
1118 /* We should aim to yield after THREAD_YIELD_TIME_SLOT milliseconds.
1119 Note: we are using real (wall clock) time for this calculation.
1120 It could be argued that CPU time may make more sense in certain
1121 contexts. The things to consider are whether the thread may have
1122 blocked (in which case wall time increases, but CPU time does not),
1123 or whether the system is heavily loaded with other processes competing
1124 for CPU time. On balance, wall clock time seems to make sense.
1125 Plus it has the added benefit that gettimeofday should be faster
1126 than calling getrusage. */
1128 thread_should_yield (struct thread
*thread
)
1130 quagga_get_relative (NULL
);
1131 return (timeval_elapsed(relative_time
, thread
->ru
.real
) >
1132 THREAD_YIELD_TIME_SLOT
);
1136 thread_getrusage (RUSAGE_T
*r
)
1138 quagga_get_relative (NULL
);
1140 getrusage(RUSAGE_SELF
, &(r
->cpu
));
1142 r
->real
= relative_time
;
1144 #ifdef HAVE_CLOCK_MONOTONIC
1145 /* quagga_get_relative() only updates recent_time if gettimeofday
1146 * based, not when using CLOCK_MONOTONIC. As we export recent_time
1147 * and guarantee to update it before threads are run...
1149 quagga_gettimeofday(&recent_time
);
1150 #endif /* HAVE_CLOCK_MONOTONIC */
1153 /* We check thread consumed time. If the system has getrusage, we'll
1154 use that to get in-depth stats on the performance of the thread in addition
1155 to wall clock time stats from gettimeofday. */
1157 thread_call (struct thread
*thread
)
1159 unsigned long realtime
, cputime
;
1162 /* Cache a pointer to the relevant cpu history thread, if the thread
1163 * does not have it yet.
1165 * Callers submitting 'dummy threads' hence must take care that
1166 * thread->cpu is NULL
1170 struct cpu_thread_history tmp
;
1172 tmp
.func
= thread
->func
;
1173 tmp
.funcname
= thread
->funcname
;
1175 thread
->hist
= hash_get (cpu_record
, &tmp
,
1176 (void * (*) (void *))cpu_record_hash_alloc
);
1179 GETRUSAGE (&thread
->ru
);
1181 (*thread
->func
) (thread
);
1185 realtime
= thread_consumed_time (&ru
, &thread
->ru
, &cputime
);
1186 thread
->hist
->real
.total
+= realtime
;
1187 if (thread
->hist
->real
.max
< realtime
)
1188 thread
->hist
->real
.max
= realtime
;
1190 thread
->hist
->cpu
.total
+= cputime
;
1191 if (thread
->hist
->cpu
.max
< cputime
)
1192 thread
->hist
->cpu
.max
= cputime
;
1195 ++(thread
->hist
->total_calls
);
1196 thread
->hist
->types
|= (1 << thread
->add_type
);
1198 #ifdef CONSUMED_TIME_CHECK
1199 if (realtime
> CONSUMED_TIME_CHECK
)
1202 * We have a CPU Hog on our hands.
1203 * Whinge about it now, so we're aware this is yet another task
1206 zlog_warn ("SLOW THREAD: task %s (%lx) ran for %lums (cpu time %lums)",
1208 (unsigned long) thread
->func
,
1209 realtime
/1000, cputime
/1000);
1211 #endif /* CONSUMED_TIME_CHECK */
1213 XFREE (MTYPE_THREAD_FUNCNAME
, thread
->funcname
);
1216 /* Execute thread */
1218 funcname_thread_execute (struct thread_master
*m
,
1219 int (*func
)(struct thread
*),
1222 const char* funcname
)
1224 struct thread dummy
;
1226 memset (&dummy
, 0, sizeof (struct thread
));
1228 dummy
.type
= THREAD_EVENT
;
1229 dummy
.add_type
= THREAD_EXECUTE
;
1230 dummy
.master
= NULL
;
1234 dummy
.funcname
= strip_funcname (funcname
);
1235 thread_call (&dummy
);
1237 XFREE (MTYPE_THREAD_FUNCNAME
, dummy
.funcname
);