]> git.proxmox.com Git - mirror_frr.git/blob - lib/thread.c
Merge remote-tracking branch 'origin/stable/3.0'
[mirror_frr.git] / lib / thread.c
1 /* Thread management routine
2 * Copyright (C) 1998, 2000 Kunihiro Ishiguro <kunihiro@zebra.org>
3 *
4 * This file is part of GNU Zebra.
5 *
6 * GNU Zebra is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2, or (at your option) any
9 * later version.
10 *
11 * GNU Zebra is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with GNU Zebra; see the file COPYING. If not, write to the Free
18 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
19 * 02111-1307, USA.
20 */
21
22 /* #define DEBUG */
23
24 #include <zebra.h>
25 #include <sys/resource.h>
26
27 #include "thread.h"
28 #include "memory.h"
29 #include "log.h"
30 #include "hash.h"
31 #include "pqueue.h"
32 #include "command.h"
33 #include "sigevent.h"
34
35 DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread")
36 DEFINE_MTYPE_STATIC(LIB, THREAD_MASTER, "Thread master")
37 DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats")
38
39 #if defined(__APPLE__)
40 #include <mach/mach.h>
41 #include <mach/mach_time.h>
42 #endif
43
44 static pthread_mutex_t cpu_record_mtx = PTHREAD_MUTEX_INITIALIZER;
45 static struct hash *cpu_record = NULL;
46
47 static unsigned long
48 timeval_elapsed (struct timeval a, struct timeval b)
49 {
50 return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO)
51 + (a.tv_usec - b.tv_usec));
52 }
53
54 static unsigned int
55 cpu_record_hash_key (struct cpu_thread_history *a)
56 {
57 return (uintptr_t) a->func;
58 }
59
60 static int
61 cpu_record_hash_cmp (const struct cpu_thread_history *a,
62 const struct cpu_thread_history *b)
63 {
64 return a->func == b->func;
65 }
66
67 static void *
68 cpu_record_hash_alloc (struct cpu_thread_history *a)
69 {
70 struct cpu_thread_history *new;
71 new = XCALLOC (MTYPE_THREAD_STATS, sizeof (struct cpu_thread_history));
72 new->func = a->func;
73 new->funcname = a->funcname;
74 return new;
75 }
76
77 static void
78 cpu_record_hash_free (void *a)
79 {
80 struct cpu_thread_history *hist = a;
81
82 XFREE (MTYPE_THREAD_STATS, hist);
83 }
84
85 static void
86 vty_out_cpu_thread_history(struct vty* vty,
87 struct cpu_thread_history *a)
88 {
89 vty_out(vty, "%5d %10ld.%03ld %9d %8ld %9ld %8ld %9ld",
90 a->total_active, a->cpu.total/1000, a->cpu.total%1000, a->total_calls,
91 a->cpu.total/a->total_calls, a->cpu.max,
92 a->real.total/a->total_calls, a->real.max);
93 vty_out(vty, " %c%c%c%c%c%c %s%s",
94 a->types & (1 << THREAD_READ) ? 'R':' ',
95 a->types & (1 << THREAD_WRITE) ? 'W':' ',
96 a->types & (1 << THREAD_TIMER) ? 'T':' ',
97 a->types & (1 << THREAD_EVENT) ? 'E':' ',
98 a->types & (1 << THREAD_EXECUTE) ? 'X':' ',
99 a->types & (1 << THREAD_BACKGROUND) ? 'B' : ' ',
100 a->funcname, VTY_NEWLINE);
101 }
102
103 static void
104 cpu_record_hash_print(struct hash_backet *bucket,
105 void *args[])
106 {
107 struct cpu_thread_history *totals = args[0];
108 struct vty *vty = args[1];
109 thread_type *filter = args[2];
110 struct cpu_thread_history *a = bucket->data;
111
112 if ( !(a->types & *filter) )
113 return;
114 vty_out_cpu_thread_history(vty,a);
115 totals->total_active += a->total_active;
116 totals->total_calls += a->total_calls;
117 totals->real.total += a->real.total;
118 if (totals->real.max < a->real.max)
119 totals->real.max = a->real.max;
120 totals->cpu.total += a->cpu.total;
121 if (totals->cpu.max < a->cpu.max)
122 totals->cpu.max = a->cpu.max;
123 }
124
125 static void
126 cpu_record_print(struct vty *vty, thread_type filter)
127 {
128 struct cpu_thread_history tmp;
129 void *args[3] = {&tmp, vty, &filter};
130
131 memset(&tmp, 0, sizeof tmp);
132 tmp.funcname = "TOTAL";
133 tmp.types = filter;
134
135 vty_out(vty, "%21s %18s %18s%s",
136 "", "CPU (user+system):", "Real (wall-clock):", VTY_NEWLINE);
137 vty_out(vty, "Active Runtime(ms) Invoked Avg uSec Max uSecs");
138 vty_out(vty, " Avg uSec Max uSecs");
139 vty_out(vty, " Type Thread%s", VTY_NEWLINE);
140
141 pthread_mutex_lock (&cpu_record_mtx);
142 {
143 hash_iterate(cpu_record,
144 (void(*)(struct hash_backet*,void*))cpu_record_hash_print,
145 args);
146 }
147 pthread_mutex_unlock (&cpu_record_mtx);
148
149 if (tmp.total_calls > 0)
150 vty_out_cpu_thread_history(vty, &tmp);
151 }
152
153 DEFUN (show_thread_cpu,
154 show_thread_cpu_cmd,
155 "show thread cpu [FILTER]",
156 SHOW_STR
157 "Thread information\n"
158 "Thread CPU usage\n"
159 "Display filter (rwtexb)\n")
160 {
161 int idx_filter = 3;
162 int i = 0;
163 thread_type filter = (thread_type) -1U;
164
165 if (argc > 3)
166 {
167 filter = 0;
168 while (argv[idx_filter]->arg[i] != '\0')
169 {
170 switch ( argv[idx_filter]->arg[i] )
171 {
172 case 'r':
173 case 'R':
174 filter |= (1 << THREAD_READ);
175 break;
176 case 'w':
177 case 'W':
178 filter |= (1 << THREAD_WRITE);
179 break;
180 case 't':
181 case 'T':
182 filter |= (1 << THREAD_TIMER);
183 break;
184 case 'e':
185 case 'E':
186 filter |= (1 << THREAD_EVENT);
187 break;
188 case 'x':
189 case 'X':
190 filter |= (1 << THREAD_EXECUTE);
191 break;
192 case 'b':
193 case 'B':
194 filter |= (1 << THREAD_BACKGROUND);
195 break;
196 default:
197 break;
198 }
199 ++i;
200 }
201 if (filter == 0)
202 {
203 vty_out(vty, "Invalid filter \"%s\" specified,"
204 " must contain at least one of 'RWTEXB'%s",
205 argv[idx_filter]->arg, VTY_NEWLINE);
206 return CMD_WARNING;
207 }
208 }
209
210 cpu_record_print(vty, filter);
211 return CMD_SUCCESS;
212 }
213
214 static void
215 cpu_record_hash_clear (struct hash_backet *bucket,
216 void *args)
217 {
218 thread_type *filter = args;
219 struct cpu_thread_history *a = bucket->data;
220
221 if ( !(a->types & *filter) )
222 return;
223
224 pthread_mutex_lock (&cpu_record_mtx);
225 {
226 hash_release (cpu_record, bucket->data);
227 }
228 pthread_mutex_unlock (&cpu_record_mtx);
229 }
230
231 static void
232 cpu_record_clear (thread_type filter)
233 {
234 thread_type *tmp = &filter;
235
236 pthread_mutex_lock (&cpu_record_mtx);
237 {
238 hash_iterate (cpu_record,
239 (void (*) (struct hash_backet*,void*)) cpu_record_hash_clear,
240 tmp);
241 }
242 pthread_mutex_unlock (&cpu_record_mtx);
243 }
244
245 DEFUN (clear_thread_cpu,
246 clear_thread_cpu_cmd,
247 "clear thread cpu [FILTER]",
248 "Clear stored data\n"
249 "Thread information\n"
250 "Thread CPU usage\n"
251 "Display filter (rwtexb)\n")
252 {
253 int idx_filter = 3;
254 int i = 0;
255 thread_type filter = (thread_type) -1U;
256
257 if (argc > 3)
258 {
259 filter = 0;
260 while (argv[idx_filter]->arg[i] != '\0')
261 {
262 switch ( argv[idx_filter]->arg[i] )
263 {
264 case 'r':
265 case 'R':
266 filter |= (1 << THREAD_READ);
267 break;
268 case 'w':
269 case 'W':
270 filter |= (1 << THREAD_WRITE);
271 break;
272 case 't':
273 case 'T':
274 filter |= (1 << THREAD_TIMER);
275 break;
276 case 'e':
277 case 'E':
278 filter |= (1 << THREAD_EVENT);
279 break;
280 case 'x':
281 case 'X':
282 filter |= (1 << THREAD_EXECUTE);
283 break;
284 case 'b':
285 case 'B':
286 filter |= (1 << THREAD_BACKGROUND);
287 break;
288 default:
289 break;
290 }
291 ++i;
292 }
293 if (filter == 0)
294 {
295 vty_out(vty, "Invalid filter \"%s\" specified,"
296 " must contain at least one of 'RWTEXB'%s",
297 argv[idx_filter]->arg, VTY_NEWLINE);
298 return CMD_WARNING;
299 }
300 }
301
302 cpu_record_clear (filter);
303 return CMD_SUCCESS;
304 }
305
306 void
307 thread_cmd_init (void)
308 {
309 install_element (VIEW_NODE, &show_thread_cpu_cmd);
310 install_element (ENABLE_NODE, &clear_thread_cpu_cmd);
311 }
312
313 static int
314 thread_timer_cmp(void *a, void *b)
315 {
316 struct thread *thread_a = a;
317 struct thread *thread_b = b;
318
319 if (timercmp (&thread_a->u.sands, &thread_b->u.sands, <))
320 return -1;
321 if (timercmp (&thread_a->u.sands, &thread_b->u.sands, >))
322 return 1;
323 return 0;
324 }
325
326 static void
327 thread_timer_update(void *node, int actual_position)
328 {
329 struct thread *thread = node;
330
331 thread->index = actual_position;
332 }
333
334 /* Allocate new thread master. */
335 struct thread_master *
336 thread_master_create (void)
337 {
338 struct thread_master *rv;
339 struct rlimit limit;
340
341 getrlimit(RLIMIT_NOFILE, &limit);
342
343 pthread_mutex_lock (&cpu_record_mtx);
344 {
345 if (cpu_record == NULL)
346 cpu_record = hash_create ((unsigned int (*) (void *))cpu_record_hash_key,
347 (int (*) (const void *, const void *))
348 cpu_record_hash_cmp);
349 }
350 pthread_mutex_unlock (&cpu_record_mtx);
351
352 rv = XCALLOC (MTYPE_THREAD_MASTER, sizeof (struct thread_master));
353 if (rv == NULL)
354 return NULL;
355
356 pthread_mutex_init (&rv->mtx, NULL);
357
358 rv->fd_limit = (int)limit.rlim_cur;
359 rv->read = XCALLOC (MTYPE_THREAD, sizeof (struct thread *) * rv->fd_limit);
360 if (rv->read == NULL)
361 {
362 XFREE (MTYPE_THREAD_MASTER, rv);
363 return NULL;
364 }
365
366 rv->write = XCALLOC (MTYPE_THREAD, sizeof (struct thread *) * rv->fd_limit);
367 if (rv->write == NULL)
368 {
369 XFREE (MTYPE_THREAD, rv->read);
370 XFREE (MTYPE_THREAD_MASTER, rv);
371 return NULL;
372 }
373
374 /* Initialize the timer queues */
375 rv->timer = pqueue_create();
376 rv->background = pqueue_create();
377 rv->timer->cmp = rv->background->cmp = thread_timer_cmp;
378 rv->timer->update = rv->background->update = thread_timer_update;
379 rv->spin = true;
380 rv->handle_signals = true;
381
382 #if defined(HAVE_POLL_CALL)
383 rv->handler.pfdsize = rv->fd_limit;
384 rv->handler.pfdcount = 0;
385 rv->handler.pfds = XCALLOC (MTYPE_THREAD_MASTER,
386 sizeof (struct pollfd) * rv->handler.pfdsize);
387 #endif
388 return rv;
389 }
390
391 /* Add a new thread to the list. */
392 static void
393 thread_list_add (struct thread_list *list, struct thread *thread)
394 {
395 thread->next = NULL;
396 thread->prev = list->tail;
397 if (list->tail)
398 list->tail->next = thread;
399 else
400 list->head = thread;
401 list->tail = thread;
402 list->count++;
403 }
404
405 /* Delete a thread from the list. */
406 static struct thread *
407 thread_list_delete (struct thread_list *list, struct thread *thread)
408 {
409 if (thread->next)
410 thread->next->prev = thread->prev;
411 else
412 list->tail = thread->prev;
413 if (thread->prev)
414 thread->prev->next = thread->next;
415 else
416 list->head = thread->next;
417 thread->next = thread->prev = NULL;
418 list->count--;
419 return thread;
420 }
421
422 static void
423 thread_delete_fd (struct thread **thread_array, struct thread *thread)
424 {
425 thread_array[thread->u.fd] = NULL;
426 }
427
428 static void
429 thread_add_fd (struct thread **thread_array, struct thread *thread)
430 {
431 thread_array[thread->u.fd] = thread;
432 }
433
434 /* Thread list is empty or not. */
435 static int
436 thread_empty (struct thread_list *list)
437 {
438 return list->head ? 0 : 1;
439 }
440
441 /* Delete top of the list and return it. */
442 static struct thread *
443 thread_trim_head (struct thread_list *list)
444 {
445 if (!thread_empty (list))
446 return thread_list_delete (list, list->head);
447 return NULL;
448 }
449
450 /* Move thread to unuse list. */
451 static void
452 thread_add_unuse (struct thread_master *m, struct thread *thread)
453 {
454 assert (m != NULL && thread != NULL);
455 assert (thread->next == NULL);
456 assert (thread->prev == NULL);
457
458 thread->type = THREAD_UNUSED;
459 thread->hist->total_active--;
460 thread_list_add (&m->unuse, thread);
461 }
462
463 /* Free all unused thread. */
464 static void
465 thread_list_free (struct thread_master *m, struct thread_list *list)
466 {
467 struct thread *t;
468 struct thread *next;
469
470 for (t = list->head; t; t = next)
471 {
472 next = t->next;
473 XFREE (MTYPE_THREAD, t);
474 list->count--;
475 m->alloc--;
476 }
477 }
478
479 static void
480 thread_array_free (struct thread_master *m, struct thread **thread_array)
481 {
482 struct thread *t;
483 int index;
484
485 for (index = 0; index < m->fd_limit; ++index)
486 {
487 t = thread_array[index];
488 if (t)
489 {
490 thread_array[index] = NULL;
491 XFREE (MTYPE_THREAD, t);
492 m->alloc--;
493 }
494 }
495 XFREE (MTYPE_THREAD, thread_array);
496 }
497
498 static void
499 thread_queue_free (struct thread_master *m, struct pqueue *queue)
500 {
501 int i;
502
503 for (i = 0; i < queue->size; i++)
504 XFREE(MTYPE_THREAD, queue->array[i]);
505
506 m->alloc -= queue->size;
507 pqueue_delete(queue);
508 }
509
510 /*
511 * thread_master_free_unused
512 *
513 * As threads are finished with they are put on the
514 * unuse list for later reuse.
515 * If we are shutting down, Free up unused threads
516 * So we can see if we forget to shut anything off
517 */
518 void
519 thread_master_free_unused (struct thread_master *m)
520 {
521 pthread_mutex_lock (&m->mtx);
522 {
523 struct thread *t;
524 while ((t = thread_trim_head(&m->unuse)) != NULL)
525 {
526 pthread_mutex_destroy (&t->mtx);
527 XFREE(MTYPE_THREAD, t);
528 }
529 }
530 pthread_mutex_unlock (&m->mtx);
531 }
532
533 /* Stop thread scheduler. */
534 void
535 thread_master_free (struct thread_master *m)
536 {
537 thread_array_free (m, m->read);
538 thread_array_free (m, m->write);
539 thread_queue_free (m, m->timer);
540 thread_list_free (m, &m->event);
541 thread_list_free (m, &m->ready);
542 thread_list_free (m, &m->unuse);
543 thread_queue_free (m, m->background);
544 pthread_mutex_destroy (&m->mtx);
545
546 #if defined(HAVE_POLL_CALL)
547 XFREE (MTYPE_THREAD_MASTER, m->handler.pfds);
548 #endif
549 XFREE (MTYPE_THREAD_MASTER, m);
550
551 pthread_mutex_lock (&cpu_record_mtx);
552 {
553 if (cpu_record)
554 {
555 hash_clean (cpu_record, cpu_record_hash_free);
556 hash_free (cpu_record);
557 cpu_record = NULL;
558 }
559 }
560 pthread_mutex_unlock (&cpu_record_mtx);
561 }
562
563 /* Return remain time in second. */
564 unsigned long
565 thread_timer_remain_second (struct thread *thread)
566 {
567 int64_t remain;
568
569 pthread_mutex_lock (&thread->mtx);
570 {
571 remain = monotime_until(&thread->u.sands, NULL) / 1000000LL;
572 }
573 pthread_mutex_unlock (&thread->mtx);
574
575 return remain < 0 ? 0 : remain;
576 }
577
578 #define debugargdef const char *funcname, const char *schedfrom, int fromln
579 #define debugargpass funcname, schedfrom, fromln
580
581 struct timeval
582 thread_timer_remain(struct thread *thread)
583 {
584 struct timeval remain;
585 pthread_mutex_lock (&thread->mtx);
586 {
587 monotime_until(&thread->u.sands, &remain);
588 }
589 pthread_mutex_unlock (&thread->mtx);
590 return remain;
591 }
592
593 /* Get new thread. */
594 static struct thread *
595 thread_get (struct thread_master *m, u_char type,
596 int (*func) (struct thread *), void *arg, debugargdef)
597 {
598 struct thread *thread = thread_trim_head (&m->unuse);
599 struct cpu_thread_history tmp;
600
601 if (! thread)
602 {
603 thread = XCALLOC (MTYPE_THREAD, sizeof (struct thread));
604 /* mutex only needs to be initialized at struct creation. */
605 pthread_mutex_init (&thread->mtx, NULL);
606 m->alloc++;
607 }
608
609 thread->type = type;
610 thread->add_type = type;
611 thread->master = m;
612 thread->arg = arg;
613 thread->index = -1;
614 thread->yield = THREAD_YIELD_TIME_SLOT; /* default */
615
616 /*
617 * So if the passed in funcname is not what we have
618 * stored that means the thread->hist needs to be
619 * updated. We keep the last one around in unused
620 * under the assumption that we are probably
621 * going to immediately allocate the same
622 * type of thread.
623 * This hopefully saves us some serious
624 * hash_get lookups.
625 */
626 if (thread->funcname != funcname ||
627 thread->func != func)
628 {
629 tmp.func = func;
630 tmp.funcname = funcname;
631 pthread_mutex_lock (&cpu_record_mtx);
632 {
633 thread->hist = hash_get (cpu_record, &tmp,
634 (void * (*) (void *))cpu_record_hash_alloc);
635 }
636 pthread_mutex_unlock (&cpu_record_mtx);
637 }
638 thread->hist->total_active++;
639 thread->func = func;
640 thread->funcname = funcname;
641 thread->schedfrom = schedfrom;
642 thread->schedfrom_line = fromln;
643
644 return thread;
645 }
646
647 #if defined (HAVE_POLL_CALL)
648
649 #define fd_copy_fd_set(X) (X)
650
651 /* generic add thread function */
652 static struct thread *
653 generic_thread_add(struct thread_master *m, int (*func) (struct thread *),
654 void *arg, int fd, int dir, debugargdef)
655 {
656 struct thread *thread;
657
658 u_char type;
659 short int event;
660
661 if (dir == THREAD_READ)
662 {
663 event = (POLLIN | POLLHUP);
664 type = THREAD_READ;
665 }
666 else
667 {
668 event = (POLLOUT | POLLHUP);
669 type = THREAD_WRITE;
670 }
671
672 nfds_t queuepos = m->handler.pfdcount;
673 nfds_t i=0;
674 for (i=0; i<m->handler.pfdcount; i++)
675 if (m->handler.pfds[i].fd == fd)
676 {
677 queuepos = i;
678 break;
679 }
680
681 /* is there enough space for a new fd? */
682 assert (queuepos < m->handler.pfdsize);
683
684 thread = thread_get (m, type, func, arg, debugargpass);
685 m->handler.pfds[queuepos].fd = fd;
686 m->handler.pfds[queuepos].events |= event;
687 if (queuepos == m->handler.pfdcount)
688 m->handler.pfdcount++;
689
690 return thread;
691 }
692 #else
693
694 #define fd_copy_fd_set(X) (X)
695 #endif
696
697 static int
698 fd_select (struct thread_master *m, int size, thread_fd_set *read, thread_fd_set *write, thread_fd_set *except, struct timeval *timer_wait)
699 {
700 int num;
701
702 /* If timer_wait is null here, that means either select() or poll() should
703 * block indefinitely, unless the thread_master has overriden it. select()
704 * and poll() differ in the timeout values they interpret as an indefinite
705 * block; select() requires a null pointer, while poll takes a millisecond
706 * value of -1.
707 *
708 * The thread_master owner has the option of overriding the default behavior
709 * by setting ->selectpoll_timeout. If the value is positive, it specifies
710 * the maximum number of milliseconds to wait. If the timeout is -1, it
711 * specifies that we should never wait and always return immediately even if
712 * no event is detected. If the value is zero, the behavior is default.
713 */
714
715 #if defined(HAVE_POLL_CALL)
716 int timeout = -1;
717
718 if (timer_wait != NULL && m->selectpoll_timeout == 0) // use the default value
719 timeout = (timer_wait->tv_sec*1000) + (timer_wait->tv_usec/1000);
720 else if (m->selectpoll_timeout > 0) // use the user's timeout
721 timeout = m->selectpoll_timeout;
722 else if (m->selectpoll_timeout < 0) // effect a poll (return immediately)
723 timeout = 0;
724
725 num = poll (m->handler.pfds, m->handler.pfdcount + m->handler.pfdcountsnmp, timeout);
726 #else
727 struct timeval timeout;
728 if (m->selectpoll_timeout > 0) // use the user's timeout
729 {
730 timeout.tv_sec = m->selectpoll_timeout / 1000;
731 timeout.tv_usec = (m->selectpoll_timeout % 1000) * 1000;
732 timer_wait = &timeout;
733 }
734 else if (m->selectpoll_timeout < 0) // effect a poll (return immediately)
735 {
736 timeout.tv_sec = 0;
737 timeout.tv_usec = 0;
738 timer_wait = &timeout;
739 }
740 num = select (size, read, write, except, timer_wait);
741 #endif
742
743 return num;
744 }
745
746 static int
747 fd_is_set (struct thread *thread, thread_fd_set *fdset, int pos)
748 {
749 #if defined(HAVE_POLL_CALL)
750 return 1;
751 #else
752 return FD_ISSET (THREAD_FD (thread), fdset);
753 #endif
754 }
755
756 static int
757 fd_clear_read_write (struct thread *thread)
758 {
759 #if !defined(HAVE_POLL_CALL)
760 thread_fd_set *fdset = NULL;
761 int fd = THREAD_FD (thread);
762
763 if (thread->type == THREAD_READ)
764 fdset = &thread->master->handler.readfd;
765 else
766 fdset = &thread->master->handler.writefd;
767
768 if (!FD_ISSET (fd, fdset))
769 return 0;
770
771 FD_CLR (fd, fdset);
772 #endif
773 return 1;
774 }
775
776 /* Add new read thread. */
777 struct thread *
778 funcname_thread_add_read_write (int dir, struct thread_master *m,
779 int (*func) (struct thread *), void *arg, int fd,
780 debugargdef)
781 {
782 struct thread *thread = NULL;
783
784 pthread_mutex_lock (&m->mtx);
785 {
786 #if defined (HAVE_POLL_CALL)
787 thread = generic_thread_add(m, func, arg, fd, dir, debugargpass);
788 #else
789 if (fd >= FD_SETSIZE)
790 {
791 zlog_err ("File descriptor %d is >= FD_SETSIZE (%d). Please recompile"
792 "with --enable-poll=yes", fd, FD_SETSIZE);
793 assert (fd < FD_SETSIZE && !"fd >= FD_SETSIZE");
794 }
795 thread_fd_set *fdset = NULL;
796 if (dir == THREAD_READ)
797 fdset = &m->handler.readfd;
798 else
799 fdset = &m->handler.writefd;
800
801 if (FD_ISSET (fd, fdset))
802 {
803 zlog_warn ("There is already %s fd [%d]",
804 (dir == THREAD_READ) ? "read" : "write", fd);
805 }
806 else
807 {
808 FD_SET (fd, fdset);
809 thread = thread_get (m, dir, func, arg, debugargpass);
810 }
811 #endif
812
813 if (thread)
814 {
815 pthread_mutex_lock (&thread->mtx);
816 {
817 thread->u.fd = fd;
818 if (dir == THREAD_READ)
819 thread_add_fd (m->read, thread);
820 else
821 thread_add_fd (m->write, thread);
822 }
823 pthread_mutex_unlock (&thread->mtx);
824 }
825 }
826 pthread_mutex_unlock (&m->mtx);
827
828 return thread;
829 }
830
831 static struct thread *
832 funcname_thread_add_timer_timeval (struct thread_master *m,
833 int (*func) (struct thread *),
834 int type,
835 void *arg,
836 struct timeval *time_relative,
837 debugargdef)
838 {
839 struct thread *thread;
840 struct pqueue *queue;
841
842 assert (m != NULL);
843
844 assert (type == THREAD_TIMER || type == THREAD_BACKGROUND);
845 assert (time_relative);
846
847 pthread_mutex_lock (&m->mtx);
848 {
849 queue = ((type == THREAD_TIMER) ? m->timer : m->background);
850 thread = thread_get (m, type, func, arg, debugargpass);
851
852 pthread_mutex_lock (&thread->mtx);
853 {
854 monotime(&thread->u.sands);
855 timeradd(&thread->u.sands, time_relative, &thread->u.sands);
856 pqueue_enqueue(thread, queue);
857 }
858 pthread_mutex_unlock (&thread->mtx);
859 }
860 pthread_mutex_unlock (&m->mtx);
861
862 return thread;
863 }
864
865
866 /* Add timer event thread. */
867 struct thread *
868 funcname_thread_add_timer (struct thread_master *m,
869 int (*func) (struct thread *),
870 void *arg, long timer,
871 debugargdef)
872 {
873 struct timeval trel;
874
875 assert (m != NULL);
876
877 trel.tv_sec = timer;
878 trel.tv_usec = 0;
879
880 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER, arg,
881 &trel, debugargpass);
882 }
883
884 /* Add timer event thread with "millisecond" resolution */
885 struct thread *
886 funcname_thread_add_timer_msec (struct thread_master *m,
887 int (*func) (struct thread *),
888 void *arg, long timer,
889 debugargdef)
890 {
891 struct timeval trel;
892
893 assert (m != NULL);
894
895 trel.tv_sec = timer / 1000;
896 trel.tv_usec = 1000*(timer % 1000);
897
898 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER,
899 arg, &trel, debugargpass);
900 }
901
902 /* Add timer event thread with "millisecond" resolution */
903 struct thread *
904 funcname_thread_add_timer_tv (struct thread_master *m,
905 int (*func) (struct thread *),
906 void *arg, struct timeval *tv,
907 debugargdef)
908 {
909 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER,
910 arg, tv, debugargpass);
911 }
912
913 /* Add a background thread, with an optional millisec delay */
914 struct thread *
915 funcname_thread_add_background (struct thread_master *m,
916 int (*func) (struct thread *),
917 void *arg, long delay,
918 debugargdef)
919 {
920 struct timeval trel;
921
922 assert (m != NULL);
923
924 if (delay)
925 {
926 trel.tv_sec = delay / 1000;
927 trel.tv_usec = 1000*(delay % 1000);
928 }
929 else
930 {
931 trel.tv_sec = 0;
932 trel.tv_usec = 0;
933 }
934
935 return funcname_thread_add_timer_timeval (m, func, THREAD_BACKGROUND,
936 arg, &trel, debugargpass);
937 }
938
939 /* Add simple event thread. */
940 struct thread *
941 funcname_thread_add_event (struct thread_master *m,
942 int (*func) (struct thread *), void *arg, int val,
943 debugargdef)
944 {
945 struct thread *thread;
946
947 assert (m != NULL);
948
949 pthread_mutex_lock (&m->mtx);
950 {
951 thread = thread_get (m, THREAD_EVENT, func, arg, debugargpass);
952 pthread_mutex_lock (&thread->mtx);
953 {
954 thread->u.val = val;
955 thread_list_add (&m->event, thread);
956 }
957 pthread_mutex_unlock (&thread->mtx);
958 }
959 pthread_mutex_unlock (&m->mtx);
960
961 return thread;
962 }
963
964 static void
965 thread_cancel_read_or_write (struct thread *thread, short int state)
966 {
967 #if defined(HAVE_POLL_CALL)
968 nfds_t i;
969
970 for (i=0;i<thread->master->handler.pfdcount;++i)
971 if (thread->master->handler.pfds[i].fd == thread->u.fd)
972 {
973 thread->master->handler.pfds[i].events &= ~(state);
974
975 /* remove thread fds from pfd list */
976 if (thread->master->handler.pfds[i].events == 0)
977 {
978 memmove(thread->master->handler.pfds+i,
979 thread->master->handler.pfds+i+1,
980 (thread->master->handler.pfdsize-i-1) * sizeof(struct pollfd));
981 thread->master->handler.pfdcount--;
982 return;
983 }
984 }
985 #endif
986
987 fd_clear_read_write (thread);
988 }
989
990 /**
991 * Cancel thread from scheduler.
992 *
993 * This function is *NOT* MT-safe. DO NOT call it from any other pthread except
994 * the one which owns thread->master.
995 */
996 void
997 thread_cancel (struct thread *thread)
998 {
999 struct thread_list *list = NULL;
1000 struct pqueue *queue = NULL;
1001 struct thread **thread_array = NULL;
1002
1003 pthread_mutex_lock (&thread->master->mtx);
1004 pthread_mutex_lock (&thread->mtx);
1005
1006 switch (thread->type)
1007 {
1008 case THREAD_READ:
1009 #if defined (HAVE_POLL_CALL)
1010 thread_cancel_read_or_write (thread, POLLIN | POLLHUP);
1011 #else
1012 thread_cancel_read_or_write (thread, 0);
1013 #endif
1014 thread_array = thread->master->read;
1015 break;
1016 case THREAD_WRITE:
1017 #if defined (HAVE_POLL_CALL)
1018 thread_cancel_read_or_write (thread, POLLOUT | POLLHUP);
1019 #else
1020 thread_cancel_read_or_write (thread, 0);
1021 #endif
1022 thread_array = thread->master->write;
1023 break;
1024 case THREAD_TIMER:
1025 queue = thread->master->timer;
1026 break;
1027 case THREAD_EVENT:
1028 list = &thread->master->event;
1029 break;
1030 case THREAD_READY:
1031 list = &thread->master->ready;
1032 break;
1033 case THREAD_BACKGROUND:
1034 queue = thread->master->background;
1035 break;
1036 default:
1037 goto done;
1038 break;
1039 }
1040
1041 if (queue)
1042 {
1043 assert(thread->index >= 0);
1044 pqueue_remove (thread, queue);
1045 }
1046 else if (list)
1047 {
1048 thread_list_delete (list, thread);
1049 }
1050 else if (thread_array)
1051 {
1052 thread_delete_fd (thread_array, thread);
1053 }
1054 else
1055 {
1056 assert(!"Thread should be either in queue or list or array!");
1057 }
1058
1059 thread_add_unuse (thread->master, thread);
1060
1061 done:
1062 pthread_mutex_unlock (&thread->mtx);
1063 pthread_mutex_unlock (&thread->master->mtx);
1064 }
1065
1066 /* Delete all events which has argument value arg. */
1067 unsigned int
1068 thread_cancel_event (struct thread_master *m, void *arg)
1069 {
1070 unsigned int ret = 0;
1071 struct thread *thread;
1072 struct thread *t;
1073
1074 pthread_mutex_lock (&m->mtx);
1075 {
1076 thread = m->event.head;
1077 while (thread)
1078 {
1079 t = thread;
1080 pthread_mutex_lock (&t->mtx);
1081 {
1082 thread = t->next;
1083
1084 if (t->arg == arg)
1085 {
1086 ret++;
1087 thread_list_delete (&m->event, t);
1088 thread_add_unuse (m, t);
1089 }
1090 }
1091 pthread_mutex_unlock (&t->mtx);
1092 }
1093
1094 /* thread can be on the ready list too */
1095 thread = m->ready.head;
1096 while (thread)
1097 {
1098 t = thread;
1099 pthread_mutex_lock (&t->mtx);
1100 {
1101 thread = t->next;
1102
1103 if (t->arg == arg)
1104 {
1105 ret++;
1106 thread_list_delete (&m->ready, t);
1107 thread_add_unuse (m, t);
1108 }
1109 }
1110 pthread_mutex_unlock (&t->mtx);
1111 }
1112 }
1113 pthread_mutex_unlock (&m->mtx);
1114 return ret;
1115 }
1116
1117 static struct timeval *
1118 thread_timer_wait (struct pqueue *queue, struct timeval *timer_val)
1119 {
1120 if (queue->size)
1121 {
1122 struct thread *next_timer = queue->array[0];
1123 monotime_until(&next_timer->u.sands, timer_val);
1124 return timer_val;
1125 }
1126 return NULL;
1127 }
1128
1129 static struct thread *
1130 thread_run (struct thread_master *m, struct thread *thread,
1131 struct thread *fetch)
1132 {
1133 *fetch = *thread;
1134 thread_add_unuse (m, thread);
1135 return fetch;
1136 }
1137
1138 static int
1139 thread_process_fds_helper (struct thread_master *m, struct thread *thread, thread_fd_set *fdset, short int state, int pos)
1140 {
1141 struct thread **thread_array;
1142
1143 if (!thread)
1144 return 0;
1145
1146 if (thread->type == THREAD_READ)
1147 thread_array = m->read;
1148 else
1149 thread_array = m->write;
1150
1151 if (fd_is_set (thread, fdset, pos))
1152 {
1153 fd_clear_read_write (thread);
1154 thread_delete_fd (thread_array, thread);
1155 thread_list_add (&m->ready, thread);
1156 thread->type = THREAD_READY;
1157 #if defined(HAVE_POLL_CALL)
1158 thread->master->handler.pfds[pos].events &= ~(state);
1159 #endif
1160 return 1;
1161 }
1162 return 0;
1163 }
1164
1165 #if defined(HAVE_POLL_CALL)
1166
1167 /* check poll events */
1168 static void
1169 check_pollfds(struct thread_master *m, fd_set *readfd, int num)
1170 {
1171 nfds_t i = 0;
1172 int ready = 0;
1173 for (i = 0; i < m->handler.pfdcount && ready < num ; ++i)
1174 {
1175 /* no event for current fd? immideatly continue */
1176 if(m->handler.pfds[i].revents == 0)
1177 continue;
1178
1179 ready++;
1180
1181 /* POLLIN / POLLOUT process event */
1182 if (m->handler.pfds[i].revents & POLLIN)
1183 thread_process_fds_helper(m, m->read[m->handler.pfds[i].fd], NULL, POLLIN, i);
1184 if (m->handler.pfds[i].revents & POLLOUT)
1185 thread_process_fds_helper(m, m->write[m->handler.pfds[i].fd], NULL, POLLOUT, i);
1186
1187 /* remove fd from list on POLLNVAL */
1188 if (m->handler.pfds[i].revents & POLLNVAL ||
1189 m->handler.pfds[i].revents & POLLHUP)
1190 {
1191 memmove(m->handler.pfds+i,
1192 m->handler.pfds+i+1,
1193 (m->handler.pfdsize-i-1) * sizeof(struct pollfd));
1194 m->handler.pfdcount--;
1195 i--;
1196 }
1197 else
1198 m->handler.pfds[i].revents = 0;
1199 }
1200 }
1201 #endif
1202
1203 static void
1204 thread_process_fds (struct thread_master *m, thread_fd_set *rset, thread_fd_set *wset, int num)
1205 {
1206 #if defined (HAVE_POLL_CALL)
1207 check_pollfds (m, rset, num);
1208 #else
1209 int ready = 0, index;
1210
1211 for (index = 0; index < m->fd_limit && ready < num; ++index)
1212 {
1213 ready += thread_process_fds_helper (m, m->read[index], rset, 0, 0);
1214 ready += thread_process_fds_helper (m, m->write[index], wset, 0, 0);
1215 }
1216 #endif
1217 }
1218
1219 /* Add all timers that have popped to the ready list. */
1220 static unsigned int
1221 thread_timer_process (struct pqueue *queue, struct timeval *timenow)
1222 {
1223 struct thread *thread;
1224 unsigned int ready = 0;
1225
1226 while (queue->size)
1227 {
1228 thread = queue->array[0];
1229 if (timercmp (timenow, &thread->u.sands, <))
1230 return ready;
1231 pqueue_dequeue(queue);
1232 thread->type = THREAD_READY;
1233 thread_list_add (&thread->master->ready, thread);
1234 ready++;
1235 }
1236 return ready;
1237 }
1238
1239 /* process a list en masse, e.g. for event thread lists */
1240 static unsigned int
1241 thread_process (struct thread_list *list)
1242 {
1243 struct thread *thread;
1244 struct thread *next;
1245 unsigned int ready = 0;
1246
1247 for (thread = list->head; thread; thread = next)
1248 {
1249 next = thread->next;
1250 thread_list_delete (list, thread);
1251 thread->type = THREAD_READY;
1252 thread_list_add (&thread->master->ready, thread);
1253 ready++;
1254 }
1255 return ready;
1256 }
1257
1258
1259 /* Fetch next ready thread. */
1260 struct thread *
1261 thread_fetch (struct thread_master *m, struct thread *fetch)
1262 {
1263 struct thread *thread;
1264 thread_fd_set readfd;
1265 thread_fd_set writefd;
1266 thread_fd_set exceptfd;
1267 struct timeval now;
1268 struct timeval timer_val = { .tv_sec = 0, .tv_usec = 0 };
1269 struct timeval timer_val_bg;
1270 struct timeval *timer_wait = &timer_val;
1271 struct timeval *timer_wait_bg;
1272
1273 do
1274 {
1275 int num = 0;
1276
1277 /* Signals pre-empt everything */
1278 if (m->handle_signals)
1279 quagga_sigevent_process ();
1280
1281 pthread_mutex_lock (&m->mtx);
1282 /* Drain the ready queue of already scheduled jobs, before scheduling
1283 * more.
1284 */
1285 if ((thread = thread_trim_head (&m->ready)) != NULL)
1286 {
1287 fetch = thread_run (m, thread, fetch);
1288 pthread_mutex_unlock (&m->mtx);
1289 return fetch;
1290 }
1291
1292 /* To be fair to all kinds of threads, and avoid starvation, we
1293 * need to be careful to consider all thread types for scheduling
1294 * in each quanta. I.e. we should not return early from here on.
1295 */
1296
1297 /* Normal event are the next highest priority. */
1298 thread_process (&m->event);
1299
1300 /* Structure copy. */
1301 #if !defined(HAVE_POLL_CALL)
1302 readfd = fd_copy_fd_set(m->handler.readfd);
1303 writefd = fd_copy_fd_set(m->handler.writefd);
1304 exceptfd = fd_copy_fd_set(m->handler.exceptfd);
1305 #endif
1306
1307 /* Calculate select wait timer if nothing else to do */
1308 if (m->ready.count == 0)
1309 {
1310 timer_wait = thread_timer_wait (m->timer, &timer_val);
1311 timer_wait_bg = thread_timer_wait (m->background, &timer_val_bg);
1312
1313 if (timer_wait_bg &&
1314 (!timer_wait || (timercmp (timer_wait, timer_wait_bg, >))))
1315 timer_wait = timer_wait_bg;
1316 }
1317
1318 if (timer_wait && timer_wait->tv_sec < 0)
1319 {
1320 timerclear(&timer_val);
1321 timer_wait = &timer_val;
1322 }
1323
1324 num = fd_select (m, FD_SETSIZE, &readfd, &writefd, &exceptfd, timer_wait);
1325
1326 /* Signals should get quick treatment */
1327 if (num < 0)
1328 {
1329 if (errno == EINTR)
1330 {
1331 pthread_mutex_unlock (&m->mtx);
1332 continue; /* signal received - process it */
1333 }
1334 zlog_warn ("select() error: %s", safe_strerror (errno));
1335 pthread_mutex_unlock (&m->mtx);
1336 return NULL;
1337 }
1338
1339 /* Check foreground timers. Historically, they have had higher
1340 priority than I/O threads, so let's push them onto the ready
1341 list in front of the I/O threads. */
1342 monotime(&now);
1343 thread_timer_process (m->timer, &now);
1344
1345 /* Got IO, process it */
1346 if (num > 0)
1347 thread_process_fds (m, &readfd, &writefd, num);
1348
1349 #if 0
1350 /* If any threads were made ready above (I/O or foreground timer),
1351 perhaps we should avoid adding background timers to the ready
1352 list at this time. If this is code is uncommented, then background
1353 timer threads will not run unless there is nothing else to do. */
1354 if ((thread = thread_trim_head (&m->ready)) != NULL)
1355 {
1356 fetch = thread_run (m, thread, fetch);
1357 pthread_mutex_unlock (&m->mtx);
1358 return fetch;
1359 }
1360 #endif
1361
1362 /* Background timer/events, lowest priority */
1363 thread_timer_process (m->background, &now);
1364
1365 if ((thread = thread_trim_head (&m->ready)) != NULL)
1366 {
1367 fetch = thread_run (m, thread, fetch);
1368 pthread_mutex_unlock (&m->mtx);
1369 return fetch;
1370 }
1371
1372 pthread_mutex_unlock (&m->mtx);
1373
1374 } while (m->spin);
1375
1376 return NULL;
1377 }
1378
1379 unsigned long
1380 thread_consumed_time (RUSAGE_T *now, RUSAGE_T *start, unsigned long *cputime)
1381 {
1382 /* This is 'user + sys' time. */
1383 *cputime = timeval_elapsed (now->cpu.ru_utime, start->cpu.ru_utime) +
1384 timeval_elapsed (now->cpu.ru_stime, start->cpu.ru_stime);
1385 return timeval_elapsed (now->real, start->real);
1386 }
1387
1388 /* We should aim to yield after yield milliseconds, which defaults
1389 to THREAD_YIELD_TIME_SLOT .
1390 Note: we are using real (wall clock) time for this calculation.
1391 It could be argued that CPU time may make more sense in certain
1392 contexts. The things to consider are whether the thread may have
1393 blocked (in which case wall time increases, but CPU time does not),
1394 or whether the system is heavily loaded with other processes competing
1395 for CPU time. On balance, wall clock time seems to make sense.
1396 Plus it has the added benefit that gettimeofday should be faster
1397 than calling getrusage. */
1398 int
1399 thread_should_yield (struct thread *thread)
1400 {
1401 int result;
1402 pthread_mutex_lock (&thread->mtx);
1403 {
1404 result = monotime_since(&thread->real, NULL) > (int64_t)thread->yield;
1405 }
1406 pthread_mutex_unlock (&thread->mtx);
1407 return result;
1408 }
1409
1410 void
1411 thread_set_yield_time (struct thread *thread, unsigned long yield_time)
1412 {
1413 pthread_mutex_lock (&thread->mtx);
1414 {
1415 thread->yield = yield_time;
1416 }
1417 pthread_mutex_unlock (&thread->mtx);
1418 }
1419
1420 void
1421 thread_getrusage (RUSAGE_T *r)
1422 {
1423 monotime(&r->real);
1424 getrusage(RUSAGE_SELF, &(r->cpu));
1425 }
1426
1427 struct thread *thread_current = NULL;
1428
1429 /* We check thread consumed time. If the system has getrusage, we'll
1430 use that to get in-depth stats on the performance of the thread in addition
1431 to wall clock time stats from gettimeofday. */
1432 void
1433 thread_call (struct thread *thread)
1434 {
1435 unsigned long realtime, cputime;
1436 RUSAGE_T before, after;
1437
1438 GETRUSAGE (&before);
1439 thread->real = before.real;
1440
1441 thread_current = thread;
1442 (*thread->func) (thread);
1443 thread_current = NULL;
1444
1445 GETRUSAGE (&after);
1446
1447 realtime = thread_consumed_time (&after, &before, &cputime);
1448 thread->hist->real.total += realtime;
1449 if (thread->hist->real.max < realtime)
1450 thread->hist->real.max = realtime;
1451 thread->hist->cpu.total += cputime;
1452 if (thread->hist->cpu.max < cputime)
1453 thread->hist->cpu.max = cputime;
1454
1455 ++(thread->hist->total_calls);
1456 thread->hist->types |= (1 << thread->add_type);
1457
1458 #ifdef CONSUMED_TIME_CHECK
1459 if (realtime > CONSUMED_TIME_CHECK)
1460 {
1461 /*
1462 * We have a CPU Hog on our hands.
1463 * Whinge about it now, so we're aware this is yet another task
1464 * to fix.
1465 */
1466 zlog_warn ("SLOW THREAD: task %s (%lx) ran for %lums (cpu time %lums)",
1467 thread->funcname,
1468 (unsigned long) thread->func,
1469 realtime/1000, cputime/1000);
1470 }
1471 #endif /* CONSUMED_TIME_CHECK */
1472 }
1473
1474 /* Execute thread */
1475 struct thread *
1476 funcname_thread_execute (struct thread_master *m,
1477 int (*func)(struct thread *),
1478 void *arg,
1479 int val,
1480 debugargdef)
1481 {
1482 struct cpu_thread_history tmp;
1483 struct thread dummy;
1484
1485 memset (&dummy, 0, sizeof (struct thread));
1486
1487 pthread_mutex_init (&dummy.mtx, NULL);
1488 dummy.type = THREAD_EVENT;
1489 dummy.add_type = THREAD_EXECUTE;
1490 dummy.master = NULL;
1491 dummy.arg = arg;
1492 dummy.u.val = val;
1493
1494 tmp.func = dummy.func = func;
1495 tmp.funcname = dummy.funcname = funcname;
1496 pthread_mutex_lock (&cpu_record_mtx);
1497 {
1498 dummy.hist = hash_get (cpu_record, &tmp,
1499 (void * (*) (void *))cpu_record_hash_alloc);
1500 }
1501 pthread_mutex_unlock (&cpu_record_mtx);
1502
1503 dummy.schedfrom = schedfrom;
1504 dummy.schedfrom_line = fromln;
1505
1506 thread_call (&dummy);
1507
1508 return NULL;
1509 }