]> git.proxmox.com Git - mirror_frr.git/blame - lib/thread.c
Merge remote-tracking branch 'origin/master' into EIGRP
[mirror_frr.git] / lib / thread.c
CommitLineData
718e3744 1/* Thread management routine
2 * Copyright (C) 1998, 2000 Kunihiro Ishiguro <kunihiro@zebra.org>
3 *
4 * This file is part of GNU Zebra.
5 *
6 * GNU Zebra is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2, or (at your option) any
9 * later version.
10 *
11 * GNU Zebra is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with GNU Zebra; see the file COPYING. If not, write to the Free
18 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
19 * 02111-1307, USA.
20 */
21
22/* #define DEBUG */
23
24#include <zebra.h>
308d14ae 25#include <sys/resource.h>
718e3744 26
27#include "thread.h"
28#include "memory.h"
29#include "log.h"
e04ab74d 30#include "hash.h"
4becea72 31#include "pqueue.h"
e04ab74d 32#include "command.h"
05c447dd 33#include "sigevent.h"
d6be5fb9 34
4a1ab8e4
DL
35DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread")
36DEFINE_MTYPE_STATIC(LIB, THREAD_MASTER, "Thread master")
37DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats")
38
3b96b781
HT
39#if defined(__APPLE__)
40#include <mach/mach.h>
41#include <mach/mach_time.h>
42#endif
43
db9c0df9 44/* Relative time, since startup */
e04ab74d 45static struct hash *cpu_record = NULL;
6b0655a2 46
816c2194 47static unsigned long
718e3744 48timeval_elapsed (struct timeval a, struct timeval b)
49{
50 return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO)
51 + (a.tv_usec - b.tv_usec));
52}
6b0655a2 53
a48b4e6d 54static unsigned int
e04ab74d 55cpu_record_hash_key (struct cpu_thread_history *a)
56{
8cc4198f 57 return (uintptr_t) a->func;
e04ab74d 58}
59
60static int
ffe11cfb
SH
61cpu_record_hash_cmp (const struct cpu_thread_history *a,
62 const struct cpu_thread_history *b)
e04ab74d 63{
64 return a->func == b->func;
65}
66
8cc4198f 67static void *
e04ab74d 68cpu_record_hash_alloc (struct cpu_thread_history *a)
69{
70 struct cpu_thread_history *new;
039b9577 71 new = XCALLOC (MTYPE_THREAD_STATS, sizeof (struct cpu_thread_history));
e04ab74d 72 new->func = a->func;
9c7753e4 73 new->funcname = a->funcname;
e04ab74d 74 return new;
75}
76
228da428
CC
77static void
78cpu_record_hash_free (void *a)
79{
80 struct cpu_thread_history *hist = a;
81
228da428
CC
82 XFREE (MTYPE_THREAD_STATS, hist);
83}
84
f63f06da 85static void
e04ab74d 86vty_out_cpu_thread_history(struct vty* vty,
87 struct cpu_thread_history *a)
88{
f7c62e11
DS
89 vty_out(vty, "%5d %10ld.%03ld %9d %8ld %9ld %8ld %9ld",
90 a->total_active, a->cpu.total/1000, a->cpu.total%1000, a->total_calls,
8b70d0b0 91 a->cpu.total/a->total_calls, a->cpu.max,
92 a->real.total/a->total_calls, a->real.max);
8b70d0b0 93 vty_out(vty, " %c%c%c%c%c%c %s%s",
e04ab74d 94 a->types & (1 << THREAD_READ) ? 'R':' ',
95 a->types & (1 << THREAD_WRITE) ? 'W':' ',
96 a->types & (1 << THREAD_TIMER) ? 'T':' ',
97 a->types & (1 << THREAD_EVENT) ? 'E':' ',
98 a->types & (1 << THREAD_EXECUTE) ? 'X':' ',
a48b4e6d 99 a->types & (1 << THREAD_BACKGROUND) ? 'B' : ' ',
e04ab74d 100 a->funcname, VTY_NEWLINE);
101}
102
103static void
104cpu_record_hash_print(struct hash_backet *bucket,
105 void *args[])
106{
107 struct cpu_thread_history *totals = args[0];
108 struct vty *vty = args[1];
41b2373c 109 thread_type *filter = args[2];
e04ab74d 110 struct cpu_thread_history *a = bucket->data;
f48f65d2 111
e04ab74d 112 if ( !(a->types & *filter) )
113 return;
114 vty_out_cpu_thread_history(vty,a);
f7c62e11 115 totals->total_active += a->total_active;
e04ab74d 116 totals->total_calls += a->total_calls;
8b70d0b0 117 totals->real.total += a->real.total;
118 if (totals->real.max < a->real.max)
119 totals->real.max = a->real.max;
8b70d0b0 120 totals->cpu.total += a->cpu.total;
121 if (totals->cpu.max < a->cpu.max)
122 totals->cpu.max = a->cpu.max;
e04ab74d 123}
124
125static void
41b2373c 126cpu_record_print(struct vty *vty, thread_type filter)
e04ab74d 127{
128 struct cpu_thread_history tmp;
129 void *args[3] = {&tmp, vty, &filter};
130
131 memset(&tmp, 0, sizeof tmp);
9c7753e4 132 tmp.funcname = "TOTAL";
e04ab74d 133 tmp.types = filter;
134
8b70d0b0 135 vty_out(vty, "%21s %18s %18s%s",
24f5e2fc 136 "", "CPU (user+system):", "Real (wall-clock):", VTY_NEWLINE);
f7c62e11 137 vty_out(vty, "Active Runtime(ms) Invoked Avg uSec Max uSecs");
8b70d0b0 138 vty_out(vty, " Avg uSec Max uSecs");
8b70d0b0 139 vty_out(vty, " Type Thread%s", VTY_NEWLINE);
e04ab74d 140 hash_iterate(cpu_record,
141 (void(*)(struct hash_backet*,void*))cpu_record_hash_print,
142 args);
143
144 if (tmp.total_calls > 0)
145 vty_out_cpu_thread_history(vty, &tmp);
146}
147
49d41a26
DS
148DEFUN (show_thread_cpu,
149 show_thread_cpu_cmd,
150 "show thread cpu [FILTER]",
151 SHOW_STR
152 "Thread information\n"
153 "Thread CPU usage\n"
154 "Display filter (rwtexb)\n")
e04ab74d 155{
c349116d 156 int idx_filter = 3;
e04ab74d 157 int i = 0;
41b2373c 158 thread_type filter = (thread_type) -1U;
e04ab74d 159
1bf1b05a 160 if (argc > 3)
e04ab74d 161 {
162 filter = 0;
c349116d 163 while (argv[idx_filter]->arg[i] != '\0')
e04ab74d 164 {
c349116d 165 switch ( argv[idx_filter]->arg[i] )
e04ab74d 166 {
167 case 'r':
168 case 'R':
169 filter |= (1 << THREAD_READ);
170 break;
171 case 'w':
172 case 'W':
173 filter |= (1 << THREAD_WRITE);
174 break;
175 case 't':
176 case 'T':
177 filter |= (1 << THREAD_TIMER);
178 break;
179 case 'e':
180 case 'E':
181 filter |= (1 << THREAD_EVENT);
182 break;
183 case 'x':
184 case 'X':
185 filter |= (1 << THREAD_EXECUTE);
186 break;
a48b4e6d 187 case 'b':
188 case 'B':
189 filter |= (1 << THREAD_BACKGROUND);
190 break;
e04ab74d 191 default:
192 break;
193 }
194 ++i;
195 }
196 if (filter == 0)
197 {
a48b4e6d 198 vty_out(vty, "Invalid filter \"%s\" specified,"
199 " must contain at least one of 'RWTEXB'%s",
c349116d 200 argv[idx_filter]->arg, VTY_NEWLINE);
e04ab74d 201 return CMD_WARNING;
202 }
203 }
204
205 cpu_record_print(vty, filter);
206 return CMD_SUCCESS;
207}
e276eb82
PJ
208
209static void
210cpu_record_hash_clear (struct hash_backet *bucket,
211 void *args)
212{
213 thread_type *filter = args;
214 struct cpu_thread_history *a = bucket->data;
f48f65d2 215
e276eb82
PJ
216 if ( !(a->types & *filter) )
217 return;
218
219 hash_release (cpu_record, bucket->data);
220}
221
222static void
223cpu_record_clear (thread_type filter)
224{
225 thread_type *tmp = &filter;
226 hash_iterate (cpu_record,
227 (void (*) (struct hash_backet*,void*)) cpu_record_hash_clear,
228 tmp);
229}
230
49d41a26
DS
231DEFUN (clear_thread_cpu,
232 clear_thread_cpu_cmd,
233 "clear thread cpu [FILTER]",
234 "Clear stored data\n"
235 "Thread information\n"
236 "Thread CPU usage\n"
237 "Display filter (rwtexb)\n")
e276eb82 238{
c349116d 239 int idx_filter = 3;
e276eb82
PJ
240 int i = 0;
241 thread_type filter = (thread_type) -1U;
242
1bf1b05a 243 if (argc > 3)
e276eb82
PJ
244 {
245 filter = 0;
c349116d 246 while (argv[idx_filter]->arg[i] != '\0')
e276eb82 247 {
c349116d 248 switch ( argv[idx_filter]->arg[i] )
e276eb82
PJ
249 {
250 case 'r':
251 case 'R':
252 filter |= (1 << THREAD_READ);
253 break;
254 case 'w':
255 case 'W':
256 filter |= (1 << THREAD_WRITE);
257 break;
258 case 't':
259 case 'T':
260 filter |= (1 << THREAD_TIMER);
261 break;
262 case 'e':
263 case 'E':
264 filter |= (1 << THREAD_EVENT);
265 break;
266 case 'x':
267 case 'X':
268 filter |= (1 << THREAD_EXECUTE);
269 break;
270 case 'b':
271 case 'B':
272 filter |= (1 << THREAD_BACKGROUND);
273 break;
274 default:
275 break;
276 }
277 ++i;
278 }
279 if (filter == 0)
280 {
281 vty_out(vty, "Invalid filter \"%s\" specified,"
282 " must contain at least one of 'RWTEXB'%s",
c349116d 283 argv[idx_filter]->arg, VTY_NEWLINE);
e276eb82
PJ
284 return CMD_WARNING;
285 }
286 }
287
288 cpu_record_clear (filter);
289 return CMD_SUCCESS;
290}
6b0655a2 291
0b84f294
DL
292void
293thread_cmd_init (void)
294{
295 install_element (VIEW_NODE, &show_thread_cpu_cmd);
296 install_element (ENABLE_NODE, &clear_thread_cpu_cmd);
297}
298
4becea72
CF
299static int
300thread_timer_cmp(void *a, void *b)
301{
302 struct thread *thread_a = a;
303 struct thread *thread_b = b;
304
d9d5c3e8 305 if (timercmp (&thread_a->u.sands, &thread_b->u.sands, <))
4becea72 306 return -1;
d9d5c3e8 307 if (timercmp (&thread_a->u.sands, &thread_b->u.sands, >))
4becea72
CF
308 return 1;
309 return 0;
310}
311
312static void
313thread_timer_update(void *node, int actual_position)
314{
315 struct thread *thread = node;
316
317 thread->index = actual_position;
318}
319
718e3744 320/* Allocate new thread master. */
321struct thread_master *
0a95a0d0 322thread_master_create (void)
718e3744 323{
4becea72 324 struct thread_master *rv;
308d14ae
DV
325 struct rlimit limit;
326
327 getrlimit(RLIMIT_NOFILE, &limit);
4becea72 328
e04ab74d 329 if (cpu_record == NULL)
8cc4198f 330 cpu_record
90645f55
SH
331 = hash_create ((unsigned int (*) (void *))cpu_record_hash_key,
332 (int (*) (const void *, const void *))cpu_record_hash_cmp);
4becea72
CF
333
334 rv = XCALLOC (MTYPE_THREAD_MASTER, sizeof (struct thread_master));
308d14ae
DV
335 if (rv == NULL)
336 {
337 return NULL;
338 }
339
340 rv->fd_limit = (int)limit.rlim_cur;
341 rv->read = XCALLOC (MTYPE_THREAD, sizeof (struct thread *) * rv->fd_limit);
342 if (rv->read == NULL)
343 {
344 XFREE (MTYPE_THREAD_MASTER, rv);
345 return NULL;
346 }
347
348 rv->write = XCALLOC (MTYPE_THREAD, sizeof (struct thread *) * rv->fd_limit);
349 if (rv->write == NULL)
350 {
351 XFREE (MTYPE_THREAD, rv->read);
352 XFREE (MTYPE_THREAD_MASTER, rv);
353 return NULL;
354 }
4becea72
CF
355
356 /* Initialize the timer queues */
357 rv->timer = pqueue_create();
358 rv->background = pqueue_create();
359 rv->timer->cmp = rv->background->cmp = thread_timer_cmp;
360 rv->timer->update = rv->background->update = thread_timer_update;
361
0a95a0d0 362#if defined(HAVE_POLL)
b53e10a1 363 rv->handler.pfdsize = rv->fd_limit;
0a95a0d0 364 rv->handler.pfdcount = 0;
f0d975f7
DS
365 rv->handler.pfds = XCALLOC (MTYPE_THREAD_MASTER,
366 sizeof (struct pollfd) * rv->handler.pfdsize);
0a95a0d0 367#endif
4becea72 368 return rv;
718e3744 369}
370
371/* Add a new thread to the list. */
372static void
373thread_list_add (struct thread_list *list, struct thread *thread)
374{
375 thread->next = NULL;
376 thread->prev = list->tail;
377 if (list->tail)
378 list->tail->next = thread;
379 else
380 list->head = thread;
381 list->tail = thread;
382 list->count++;
383}
384
718e3744 385/* Delete a thread from the list. */
386static struct thread *
387thread_list_delete (struct thread_list *list, struct thread *thread)
388{
389 if (thread->next)
390 thread->next->prev = thread->prev;
391 else
392 list->tail = thread->prev;
393 if (thread->prev)
394 thread->prev->next = thread->next;
395 else
396 list->head = thread->next;
397 thread->next = thread->prev = NULL;
398 list->count--;
399 return thread;
400}
401
308d14ae
DV
402static void
403thread_delete_fd (struct thread **thread_array, struct thread *thread)
404{
405 thread_array[thread->u.fd] = NULL;
406}
407
408static void
409thread_add_fd (struct thread **thread_array, struct thread *thread)
410{
411 thread_array[thread->u.fd] = thread;
412}
413
495f0b13
DS
414/* Thread list is empty or not. */
415static int
416thread_empty (struct thread_list *list)
417{
418 return list->head ? 0 : 1;
419}
420
421/* Delete top of the list and return it. */
422static struct thread *
423thread_trim_head (struct thread_list *list)
424{
425 if (!thread_empty (list))
426 return thread_list_delete (list, list->head);
427 return NULL;
428}
429
718e3744 430/* Move thread to unuse list. */
431static void
432thread_add_unuse (struct thread_master *m, struct thread *thread)
433{
a48b4e6d 434 assert (m != NULL && thread != NULL);
718e3744 435 assert (thread->next == NULL);
436 assert (thread->prev == NULL);
b3f5645d
DS
437
438 thread->type = THREAD_UNUSED;
f7c62e11 439 thread->hist->total_active--;
718e3744 440 thread_list_add (&m->unuse, thread);
441}
442
443/* Free all unused thread. */
444static void
445thread_list_free (struct thread_master *m, struct thread_list *list)
446{
447 struct thread *t;
448 struct thread *next;
449
450 for (t = list->head; t; t = next)
451 {
452 next = t->next;
453 XFREE (MTYPE_THREAD, t);
454 list->count--;
455 m->alloc--;
456 }
457}
458
308d14ae
DV
459static void
460thread_array_free (struct thread_master *m, struct thread **thread_array)
461{
462 struct thread *t;
463 int index;
464
465 for (index = 0; index < m->fd_limit; ++index)
466 {
467 t = thread_array[index];
468 if (t)
469 {
470 thread_array[index] = NULL;
471 XFREE (MTYPE_THREAD, t);
472 m->alloc--;
473 }
474 }
475 XFREE (MTYPE_THREAD, thread_array);
476}
477
4becea72
CF
478static void
479thread_queue_free (struct thread_master *m, struct pqueue *queue)
480{
481 int i;
482
483 for (i = 0; i < queue->size; i++)
484 XFREE(MTYPE_THREAD, queue->array[i]);
485
486 m->alloc -= queue->size;
487 pqueue_delete(queue);
488}
489
495f0b13
DS
490/*
491 * thread_master_free_unused
492 *
493 * As threads are finished with they are put on the
494 * unuse list for later reuse.
495 * If we are shutting down, Free up unused threads
496 * So we can see if we forget to shut anything off
497 */
498void
499thread_master_free_unused (struct thread_master *m)
500{
501 struct thread *t;
502 while ((t = thread_trim_head(&m->unuse)) != NULL)
503 {
504 XFREE(MTYPE_THREAD, t);
505 }
506}
507
718e3744 508/* Stop thread scheduler. */
509void
510thread_master_free (struct thread_master *m)
511{
308d14ae
DV
512 thread_array_free (m, m->read);
513 thread_array_free (m, m->write);
4becea72 514 thread_queue_free (m, m->timer);
718e3744 515 thread_list_free (m, &m->event);
516 thread_list_free (m, &m->ready);
517 thread_list_free (m, &m->unuse);
4becea72 518 thread_queue_free (m, m->background);
0a95a0d0
DS
519
520#if defined(HAVE_POLL)
521 XFREE (MTYPE_THREAD_MASTER, m->handler.pfds);
522#endif
718e3744 523 XFREE (MTYPE_THREAD_MASTER, m);
228da428
CC
524
525 if (cpu_record)
526 {
527 hash_clean (cpu_record, cpu_record_hash_free);
528 hash_free (cpu_record);
529 cpu_record = NULL;
530 }
718e3744 531}
532
718e3744 533/* Return remain time in second. */
534unsigned long
535thread_timer_remain_second (struct thread *thread)
536{
4b185cb3
DL
537 int64_t remain = monotime_until(&thread->u.sands, NULL) / 1000000LL;
538 return remain < 0 ? 0 : remain;
718e3744 539}
540
9c7753e4
DL
541#define debugargdef const char *funcname, const char *schedfrom, int fromln
542#define debugargpass funcname, schedfrom, fromln
e04ab74d 543
6ac44687
CF
544struct timeval
545thread_timer_remain(struct thread *thread)
546{
d9d5c3e8
DL
547 struct timeval remain;
548 monotime_until(&thread->u.sands, &remain);
549 return remain;
6ac44687
CF
550}
551
718e3744 552/* Get new thread. */
553static struct thread *
554thread_get (struct thread_master *m, u_char type,
9c7753e4 555 int (*func) (struct thread *), void *arg, debugargdef)
718e3744 556{
64018324 557 struct thread *thread = thread_trim_head (&m->unuse);
b3f5645d 558 struct cpu_thread_history tmp;
718e3744 559
22714f99 560 if (! thread)
718e3744 561 {
562 thread = XCALLOC (MTYPE_THREAD, sizeof (struct thread));
563 m->alloc++;
564 }
565 thread->type = type;
e04ab74d 566 thread->add_type = type;
718e3744 567 thread->master = m;
718e3744 568 thread->arg = arg;
4becea72 569 thread->index = -1;
50596be0 570 thread->yield = THREAD_YIELD_TIME_SLOT; /* default */
4becea72 571
b3f5645d
DS
572 /*
573 * So if the passed in funcname is not what we have
574 * stored that means the thread->hist needs to be
575 * updated. We keep the last one around in unused
576 * under the assumption that we are probably
577 * going to immediately allocate the same
578 * type of thread.
579 * This hopefully saves us some serious
580 * hash_get lookups.
581 */
582 if (thread->funcname != funcname ||
583 thread->func != func)
584 {
585 tmp.func = func;
586 tmp.funcname = funcname;
587 thread->hist = hash_get (cpu_record, &tmp,
588 (void * (*) (void *))cpu_record_hash_alloc);
589 }
f7c62e11 590 thread->hist->total_active++;
b3f5645d 591 thread->func = func;
9c7753e4
DL
592 thread->funcname = funcname;
593 thread->schedfrom = schedfrom;
594 thread->schedfrom_line = fromln;
e04ab74d 595
718e3744 596 return thread;
597}
598
aa037235
DS
599#if defined (HAVE_POLL)
600
209a72a6
DS
601#define fd_copy_fd_set(X) (X)
602
0a95a0d0
DS
603/* generic add thread function */
604static struct thread *
aa037235 605generic_thread_add(struct thread_master *m, int (*func) (struct thread *),
9c7753e4 606 void *arg, int fd, int dir, debugargdef)
0a95a0d0
DS
607{
608 struct thread *thread;
609
aa037235 610 u_char type;
0a95a0d0
DS
611 short int event;
612
613 if (dir == THREAD_READ)
aa037235
DS
614 {
615 event = (POLLIN | POLLHUP);
616 type = THREAD_READ;
617 }
0a95a0d0 618 else
aa037235
DS
619 {
620 event = (POLLOUT | POLLHUP);
621 type = THREAD_WRITE;
622 }
0a95a0d0
DS
623
624 nfds_t queuepos = m->handler.pfdcount;
625 nfds_t i=0;
aa037235 626 for (i=0; i<m->handler.pfdcount; i++)
0a95a0d0
DS
627 if (m->handler.pfds[i].fd == fd)
628 {
629 queuepos = i;
630 break;
631 }
632
633 /* is there enough space for a new fd? */
b53e10a1 634 assert (queuepos < m->handler.pfdsize);
0a95a0d0 635
69f30024 636 thread = thread_get (m, type, func, arg, debugargpass);
0a95a0d0
DS
637 m->handler.pfds[queuepos].fd = fd;
638 m->handler.pfds[queuepos].events |= event;
639 if (queuepos == m->handler.pfdcount)
640 m->handler.pfdcount++;
0a95a0d0 641
cc7165b6
DS
642 return thread;
643}
aa037235
DS
644#else
645
646#define fd_copy_fd_set(X) (X)
647#endif
cc7165b6 648
209a72a6 649static int
0a95a0d0 650fd_select (struct thread_master *m, int size, thread_fd_set *read, thread_fd_set *write, thread_fd_set *except, struct timeval *timer_wait)
209a72a6 651{
0a95a0d0
DS
652 int num;
653#if defined(HAVE_POLL)
654 /* recalc timeout for poll. Attention NULL pointer is no timeout with
655 select, where with poll no timeount is -1 */
656 int timeout = -1;
657 if (timer_wait != NULL)
658 timeout = (timer_wait->tv_sec*1000) + (timer_wait->tv_usec/1000);
659
660 num = poll (m->handler.pfds, m->handler.pfdcount + m->handler.pfdcountsnmp, timeout);
661#else
662 num = select (size, read, write, except, timer_wait);
663#endif
664
665 return num;
209a72a6
DS
666}
667
668static int
0a95a0d0 669fd_is_set (struct thread *thread, thread_fd_set *fdset, int pos)
209a72a6 670{
0a95a0d0
DS
671#if defined(HAVE_POLL)
672 return 1;
673#else
674 return FD_ISSET (THREAD_FD (thread), fdset);
675#endif
209a72a6
DS
676}
677
209a72a6 678static int
0a95a0d0 679fd_clear_read_write (struct thread *thread)
209a72a6 680{
0a95a0d0
DS
681#if !defined(HAVE_POLL)
682 thread_fd_set *fdset = NULL;
683 int fd = THREAD_FD (thread);
684
685 if (thread->type == THREAD_READ)
686 fdset = &thread->master->handler.readfd;
687 else
688 fdset = &thread->master->handler.writefd;
689
209a72a6
DS
690 if (!FD_ISSET (fd, fdset))
691 return 0;
692
693 FD_CLR (fd, fdset);
0a95a0d0 694#endif
209a72a6
DS
695 return 1;
696}
697
718e3744 698/* Add new read thread. */
699struct thread *
8dadcae7 700funcname_thread_add_read_write (int dir, struct thread_master *m,
9c7753e4
DL
701 int (*func) (struct thread *), void *arg, int fd,
702 debugargdef)
718e3744 703{
8dadcae7 704 struct thread *thread = NULL;
718e3744 705
aa037235
DS
706#if !defined(HAVE_POLL)
707 thread_fd_set *fdset = NULL;
708 if (dir == THREAD_READ)
709 fdset = &m->handler.readfd;
710 else
711 fdset = &m->handler.writefd;
712#endif
713
714#if defined (HAVE_POLL)
9c7753e4 715 thread = generic_thread_add(m, func, arg, fd, dir, debugargpass);
aa037235
DS
716
717 if (thread == NULL)
718 return NULL;
719#else
720 if (FD_ISSET (fd, fdset))
721 {
4525281a
DL
722 zlog_warn ("There is already %s fd [%d]",
723 (dir == THREAD_READ) ? "read" : "write", fd);
aa037235
DS
724 return NULL;
725 }
726
727 FD_SET (fd, fdset);
9c7753e4 728 thread = thread_get (m, dir, func, arg, debugargpass);
aa037235 729#endif
0a95a0d0 730
718e3744 731 thread->u.fd = fd;
8dadcae7
DS
732 if (dir == THREAD_READ)
733 thread_add_fd (m->read, thread);
734 else
735 thread_add_fd (m->write, thread);
718e3744 736
737 return thread;
738}
739
98c91ac6 740static struct thread *
741funcname_thread_add_timer_timeval (struct thread_master *m,
742 int (*func) (struct thread *),
a48b4e6d 743 int type,
98c91ac6 744 void *arg,
9c7753e4
DL
745 struct timeval *time_relative,
746 debugargdef)
718e3744 747{
718e3744 748 struct thread *thread;
4becea72 749 struct pqueue *queue;
718e3744 750
751 assert (m != NULL);
752
8b70d0b0 753 assert (type == THREAD_TIMER || type == THREAD_BACKGROUND);
a48b4e6d 754 assert (time_relative);
755
4becea72 756 queue = ((type == THREAD_TIMER) ? m->timer : m->background);
9c7753e4 757 thread = thread_get (m, type, func, arg, debugargpass);
718e3744 758
4b185cb3
DL
759 monotime(&thread->u.sands);
760 timeradd(&thread->u.sands, time_relative, &thread->u.sands);
718e3744 761
4becea72 762 pqueue_enqueue(thread, queue);
9e867fe6 763 return thread;
764}
765
98c91ac6 766
767/* Add timer event thread. */
9e867fe6 768struct thread *
98c91ac6 769funcname_thread_add_timer (struct thread_master *m,
770 int (*func) (struct thread *),
9c7753e4
DL
771 void *arg, long timer,
772 debugargdef)
9e867fe6 773{
98c91ac6 774 struct timeval trel;
9e867fe6 775
776 assert (m != NULL);
777
9076fbd3 778 trel.tv_sec = timer;
98c91ac6 779 trel.tv_usec = 0;
9e867fe6 780
a48b4e6d 781 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER, arg,
9c7753e4 782 &trel, debugargpass);
98c91ac6 783}
9e867fe6 784
98c91ac6 785/* Add timer event thread with "millisecond" resolution */
786struct thread *
787funcname_thread_add_timer_msec (struct thread_master *m,
788 int (*func) (struct thread *),
9c7753e4
DL
789 void *arg, long timer,
790 debugargdef)
98c91ac6 791{
792 struct timeval trel;
9e867fe6 793
98c91ac6 794 assert (m != NULL);
718e3744 795
af04bd7c 796 trel.tv_sec = timer / 1000;
797 trel.tv_usec = 1000*(timer % 1000);
98c91ac6 798
a48b4e6d 799 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER,
9c7753e4 800 arg, &trel, debugargpass);
a48b4e6d 801}
802
d03c4cbd
DL
803/* Add timer event thread with "millisecond" resolution */
804struct thread *
805funcname_thread_add_timer_tv (struct thread_master *m,
806 int (*func) (struct thread *),
807 void *arg, struct timeval *tv,
808 debugargdef)
809{
810 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER,
811 arg, tv, debugargpass);
812}
813
a48b4e6d 814/* Add a background thread, with an optional millisec delay */
815struct thread *
816funcname_thread_add_background (struct thread_master *m,
817 int (*func) (struct thread *),
9c7753e4
DL
818 void *arg, long delay,
819 debugargdef)
a48b4e6d 820{
821 struct timeval trel;
822
823 assert (m != NULL);
824
825 if (delay)
826 {
827 trel.tv_sec = delay / 1000;
828 trel.tv_usec = 1000*(delay % 1000);
829 }
830 else
831 {
832 trel.tv_sec = 0;
833 trel.tv_usec = 0;
834 }
835
836 return funcname_thread_add_timer_timeval (m, func, THREAD_BACKGROUND,
9c7753e4 837 arg, &trel, debugargpass);
718e3744 838}
839
840/* Add simple event thread. */
841struct thread *
e04ab74d 842funcname_thread_add_event (struct thread_master *m,
9c7753e4
DL
843 int (*func) (struct thread *), void *arg, int val,
844 debugargdef)
718e3744 845{
846 struct thread *thread;
847
848 assert (m != NULL);
849
9c7753e4 850 thread = thread_get (m, THREAD_EVENT, func, arg, debugargpass);
718e3744 851 thread->u.val = val;
852 thread_list_add (&m->event, thread);
853
854 return thread;
855}
856
0a95a0d0 857static void
1bba4c93 858thread_cancel_read_or_write (struct thread *thread, short int state)
0a95a0d0
DS
859{
860#if defined(HAVE_POLL)
861 nfds_t i;
862
863 for (i=0;i<thread->master->handler.pfdcount;++i)
864 if (thread->master->handler.pfds[i].fd == thread->u.fd)
865 {
1bba4c93
DS
866 thread->master->handler.pfds[i].events &= ~(state);
867
0a95a0d0 868 /* remove thread fds from pfd list */
1bba4c93
DS
869 if (thread->master->handler.pfds[i].events == 0)
870 {
871 memmove(thread->master->handler.pfds+i,
872 thread->master->handler.pfds+i+1,
873 (thread->master->handler.pfdsize-i-1) * sizeof(struct pollfd));
874 thread->master->handler.pfdcount--;
875 return;
876 }
0a95a0d0
DS
877 }
878#endif
879
880 fd_clear_read_write (thread);
881}
882
718e3744 883/* Cancel thread from scheduler. */
884void
885thread_cancel (struct thread *thread)
886{
4becea72
CF
887 struct thread_list *list = NULL;
888 struct pqueue *queue = NULL;
308d14ae 889 struct thread **thread_array = NULL;
a48b4e6d 890
718e3744 891 switch (thread->type)
892 {
893 case THREAD_READ:
1bba4c93
DS
894#if defined (HAVE_POLL)
895 thread_cancel_read_or_write (thread, POLLIN | POLLHUP);
896#else
897 thread_cancel_read_or_write (thread, 0);
898#endif
308d14ae 899 thread_array = thread->master->read;
718e3744 900 break;
901 case THREAD_WRITE:
1bba4c93
DS
902#if defined (HAVE_POLL)
903 thread_cancel_read_or_write (thread, POLLOUT | POLLHUP);
904#else
905 thread_cancel_read_or_write (thread, 0);
906#endif
308d14ae 907 thread_array = thread->master->write;
718e3744 908 break;
909 case THREAD_TIMER:
4becea72 910 queue = thread->master->timer;
718e3744 911 break;
912 case THREAD_EVENT:
a48b4e6d 913 list = &thread->master->event;
718e3744 914 break;
915 case THREAD_READY:
a48b4e6d 916 list = &thread->master->ready;
718e3744 917 break;
a48b4e6d 918 case THREAD_BACKGROUND:
4becea72 919 queue = thread->master->background;
8b70d0b0 920 break;
718e3744 921 default:
a48b4e6d 922 return;
718e3744 923 break;
924 }
4becea72
CF
925
926 if (queue)
927 {
928 assert(thread->index >= 0);
929 assert(thread == queue->array[thread->index]);
930 pqueue_remove_at(thread->index, queue);
931 }
932 else if (list)
933 {
934 thread_list_delete (list, thread);
935 }
308d14ae
DV
936 else if (thread_array)
937 {
938 thread_delete_fd (thread_array, thread);
939 }
4becea72
CF
940 else
941 {
308d14ae 942 assert(!"Thread should be either in queue or list or array!");
4becea72
CF
943 }
944
718e3744 945 thread_add_unuse (thread->master, thread);
946}
947
948/* Delete all events which has argument value arg. */
dc81807a 949unsigned int
718e3744 950thread_cancel_event (struct thread_master *m, void *arg)
951{
dc81807a 952 unsigned int ret = 0;
718e3744 953 struct thread *thread;
954
955 thread = m->event.head;
956 while (thread)
957 {
958 struct thread *t;
959
960 t = thread;
961 thread = t->next;
962
963 if (t->arg == arg)
a48b4e6d 964 {
dc81807a 965 ret++;
a48b4e6d 966 thread_list_delete (&m->event, t);
a48b4e6d 967 thread_add_unuse (m, t);
968 }
718e3744 969 }
1b79fcb6
JBD
970
971 /* thread can be on the ready list too */
972 thread = m->ready.head;
973 while (thread)
974 {
975 struct thread *t;
976
977 t = thread;
978 thread = t->next;
979
980 if (t->arg == arg)
981 {
982 ret++;
983 thread_list_delete (&m->ready, t);
1b79fcb6
JBD
984 thread_add_unuse (m, t);
985 }
986 }
dc81807a 987 return ret;
718e3744 988}
989
a48b4e6d 990static struct timeval *
4becea72 991thread_timer_wait (struct pqueue *queue, struct timeval *timer_val)
718e3744 992{
4becea72 993 if (queue->size)
718e3744 994 {
4becea72 995 struct thread *next_timer = queue->array[0];
4b185cb3 996 monotime_until(&next_timer->u.sands, timer_val);
718e3744 997 return timer_val;
998 }
999 return NULL;
1000}
718e3744 1001
8cc4198f 1002static struct thread *
718e3744 1003thread_run (struct thread_master *m, struct thread *thread,
1004 struct thread *fetch)
1005{
1006 *fetch = *thread;
718e3744 1007 thread_add_unuse (m, thread);
1008 return fetch;
1009}
1010
a48b4e6d 1011static int
0a95a0d0 1012thread_process_fds_helper (struct thread_master *m, struct thread *thread, thread_fd_set *fdset, short int state, int pos)
5d4ccd4e 1013{
5d4ccd4e
DS
1014 struct thread **thread_array;
1015
1016 if (!thread)
1017 return 0;
1018
1019 if (thread->type == THREAD_READ)
0a95a0d0 1020 thread_array = m->read;
5d4ccd4e 1021 else
0a95a0d0 1022 thread_array = m->write;
5d4ccd4e 1023
0a95a0d0 1024 if (fd_is_set (thread, fdset, pos))
5d4ccd4e 1025 {
0a95a0d0 1026 fd_clear_read_write (thread);
5d4ccd4e
DS
1027 thread_delete_fd (thread_array, thread);
1028 thread_list_add (&m->ready, thread);
1029 thread->type = THREAD_READY;
0a95a0d0
DS
1030#if defined(HAVE_POLL)
1031 thread->master->handler.pfds[pos].events &= ~(state);
1032#endif
5d4ccd4e
DS
1033 return 1;
1034 }
1035 return 0;
1036}
1037
0a95a0d0
DS
1038#if defined(HAVE_POLL)
1039
0a95a0d0
DS
1040/* check poll events */
1041static void
1042check_pollfds(struct thread_master *m, fd_set *readfd, int num)
1043{
1044 nfds_t i = 0;
1045 int ready = 0;
aa037235 1046 for (i = 0; i < m->handler.pfdcount && ready < num ; ++i)
0a95a0d0
DS
1047 {
1048 /* no event for current fd? immideatly continue */
1049 if(m->handler.pfds[i].revents == 0)
1050 continue;
1051
c026ca1c 1052 ready++;
0a95a0d0
DS
1053
1054 /* POLLIN / POLLOUT process event */
1055 if (m->handler.pfds[i].revents & POLLIN)
c026ca1c 1056 thread_process_fds_helper(m, m->read[m->handler.pfds[i].fd], NULL, POLLIN, i);
0a95a0d0 1057 if (m->handler.pfds[i].revents & POLLOUT)
c026ca1c 1058 thread_process_fds_helper(m, m->write[m->handler.pfds[i].fd], NULL, POLLOUT, i);
0a95a0d0 1059
1bba4c93
DS
1060 /* remove fd from list on POLLNVAL */
1061 if (m->handler.pfds[i].revents & POLLNVAL ||
1062 m->handler.pfds[i].revents & POLLHUP)
0a95a0d0 1063 {
aa037235
DS
1064 memmove(m->handler.pfds+i,
1065 m->handler.pfds+i+1,
1066 (m->handler.pfdsize-i-1) * sizeof(struct pollfd));
1067 m->handler.pfdcount--;
1068 i--;
0a95a0d0
DS
1069 }
1070 else
aa037235 1071 m->handler.pfds[i].revents = 0;
0a95a0d0
DS
1072 }
1073}
1074#endif
1075
1076static void
5d4ccd4e 1077thread_process_fds (struct thread_master *m, thread_fd_set *rset, thread_fd_set *wset, int num)
718e3744 1078{
0a95a0d0
DS
1079#if defined (HAVE_POLL)
1080 check_pollfds (m, rset, num);
1081#else
308d14ae
DV
1082 int ready = 0, index;
1083
5d4ccd4e 1084 for (index = 0; index < m->fd_limit && ready < num; ++index)
718e3744 1085 {
0a95a0d0
DS
1086 ready += thread_process_fds_helper (m, m->read[index], rset, 0, 0);
1087 ready += thread_process_fds_helper (m, m->write[index], wset, 0, 0);
718e3744 1088 }
0a95a0d0 1089#endif
718e3744 1090}
1091
8b70d0b0 1092/* Add all timers that have popped to the ready list. */
a48b4e6d 1093static unsigned int
4becea72 1094thread_timer_process (struct pqueue *queue, struct timeval *timenow)
a48b4e6d 1095{
1096 struct thread *thread;
1097 unsigned int ready = 0;
1098
4becea72 1099 while (queue->size)
8b70d0b0 1100 {
4becea72 1101 thread = queue->array[0];
d9d5c3e8 1102 if (timercmp (timenow, &thread->u.sands, <))
8b70d0b0 1103 return ready;
4becea72 1104 pqueue_dequeue(queue);
8b70d0b0 1105 thread->type = THREAD_READY;
1106 thread_list_add (&thread->master->ready, thread);
1107 ready++;
1108 }
a48b4e6d 1109 return ready;
1110}
1111
2613abe6
PJ
1112/* process a list en masse, e.g. for event thread lists */
1113static unsigned int
1114thread_process (struct thread_list *list)
1115{
1116 struct thread *thread;
b5043aab 1117 struct thread *next;
2613abe6
PJ
1118 unsigned int ready = 0;
1119
b5043aab 1120 for (thread = list->head; thread; thread = next)
2613abe6 1121 {
b5043aab 1122 next = thread->next;
2613abe6
PJ
1123 thread_list_delete (list, thread);
1124 thread->type = THREAD_READY;
1125 thread_list_add (&thread->master->ready, thread);
1126 ready++;
1127 }
1128 return ready;
1129}
1130
1131
718e3744 1132/* Fetch next ready thread. */
1133struct thread *
1134thread_fetch (struct thread_master *m, struct thread *fetch)
1135{
718e3744 1136 struct thread *thread;
209a72a6
DS
1137 thread_fd_set readfd;
1138 thread_fd_set writefd;
1139 thread_fd_set exceptfd;
4b185cb3 1140 struct timeval now;
2613abe6 1141 struct timeval timer_val = { .tv_sec = 0, .tv_usec = 0 };
a48b4e6d 1142 struct timeval timer_val_bg;
2613abe6 1143 struct timeval *timer_wait = &timer_val;
a48b4e6d 1144 struct timeval *timer_wait_bg;
718e3744 1145
1146 while (1)
1147 {
a48b4e6d 1148 int num = 0;
56e2c5e8 1149
2613abe6 1150 /* Signals pre-empt everything */
05c447dd 1151 quagga_sigevent_process ();
1152
2613abe6
PJ
1153 /* Drain the ready queue of already scheduled jobs, before scheduling
1154 * more.
a48b4e6d 1155 */
718e3744 1156 if ((thread = thread_trim_head (&m->ready)) != NULL)
05c447dd 1157 return thread_run (m, thread, fetch);
a48b4e6d 1158
2613abe6
PJ
1159 /* To be fair to all kinds of threads, and avoid starvation, we
1160 * need to be careful to consider all thread types for scheduling
1161 * in each quanta. I.e. we should not return early from here on.
1162 */
1163
1164 /* Normal event are the next highest priority. */
1165 thread_process (&m->event);
1166
718e3744 1167 /* Structure copy. */
0a95a0d0
DS
1168#if !defined(HAVE_POLL)
1169 readfd = fd_copy_fd_set(m->handler.readfd);
1170 writefd = fd_copy_fd_set(m->handler.writefd);
1171 exceptfd = fd_copy_fd_set(m->handler.exceptfd);
1172#endif
a48b4e6d 1173
1174 /* Calculate select wait timer if nothing else to do */
2613abe6
PJ
1175 if (m->ready.count == 0)
1176 {
4becea72
CF
1177 timer_wait = thread_timer_wait (m->timer, &timer_val);
1178 timer_wait_bg = thread_timer_wait (m->background, &timer_val_bg);
2613abe6
PJ
1179
1180 if (timer_wait_bg &&
d9d5c3e8 1181 (!timer_wait || (timercmp (timer_wait, timer_wait_bg, >))))
2613abe6
PJ
1182 timer_wait = timer_wait_bg;
1183 }
56e2c5e8 1184
e0e2a990
DL
1185 if (timer_wait && timer_wait->tv_sec < 0)
1186 {
1187 timerclear(&timer_val);
1188 timer_wait = &timer_val;
1189 }
1190
0a95a0d0 1191 num = fd_select (m, FD_SETSIZE, &readfd, &writefd, &exceptfd, timer_wait);
a48b4e6d 1192
1193 /* Signals should get quick treatment */
718e3744 1194 if (num < 0)
05c447dd 1195 {
1196 if (errno == EINTR)
a48b4e6d 1197 continue; /* signal received - process it */
6099b3b5 1198 zlog_warn ("select() error: %s", safe_strerror (errno));
5d4ccd4e 1199 return NULL;
05c447dd 1200 }
8b70d0b0 1201
1202 /* Check foreground timers. Historically, they have had higher
1203 priority than I/O threads, so let's push them onto the ready
1204 list in front of the I/O threads. */
4b185cb3
DL
1205 monotime(&now);
1206 thread_timer_process (m->timer, &now);
a48b4e6d 1207
1208 /* Got IO, process it */
1209 if (num > 0)
5d4ccd4e 1210 thread_process_fds (m, &readfd, &writefd, num);
8b70d0b0 1211
1212#if 0
1213 /* If any threads were made ready above (I/O or foreground timer),
1214 perhaps we should avoid adding background timers to the ready
1215 list at this time. If this is code is uncommented, then background
1216 timer threads will not run unless there is nothing else to do. */
1217 if ((thread = thread_trim_head (&m->ready)) != NULL)
1218 return thread_run (m, thread, fetch);
1219#endif
1220
a48b4e6d 1221 /* Background timer/events, lowest priority */
4b185cb3 1222 thread_timer_process (m->background, &now);
a48b4e6d 1223
8b70d0b0 1224 if ((thread = thread_trim_head (&m->ready)) != NULL)
05c447dd 1225 return thread_run (m, thread, fetch);
718e3744 1226 }
1227}
1228
924b9229 1229unsigned long
8b70d0b0 1230thread_consumed_time (RUSAGE_T *now, RUSAGE_T *start, unsigned long *cputime)
718e3744 1231{
718e3744 1232 /* This is 'user + sys' time. */
8b70d0b0 1233 *cputime = timeval_elapsed (now->cpu.ru_utime, start->cpu.ru_utime) +
1234 timeval_elapsed (now->cpu.ru_stime, start->cpu.ru_stime);
8b70d0b0 1235 return timeval_elapsed (now->real, start->real);
1236}
1237
50596be0
DS
1238/* We should aim to yield after yield milliseconds, which defaults
1239 to THREAD_YIELD_TIME_SLOT .
8b70d0b0 1240 Note: we are using real (wall clock) time for this calculation.
1241 It could be argued that CPU time may make more sense in certain
1242 contexts. The things to consider are whether the thread may have
1243 blocked (in which case wall time increases, but CPU time does not),
1244 or whether the system is heavily loaded with other processes competing
1245 for CPU time. On balance, wall clock time seems to make sense.
1246 Plus it has the added benefit that gettimeofday should be faster
1247 than calling getrusage. */
718e3744 1248int
1249thread_should_yield (struct thread *thread)
1250{
4b185cb3 1251 return monotime_since(&thread->real, NULL) > (int64_t)thread->yield;
50596be0
DS
1252}
1253
1254void
1255thread_set_yield_time (struct thread *thread, unsigned long yield_time)
1256{
1257 thread->yield = yield_time;
718e3744 1258}
1259
db9c0df9
PJ
1260void
1261thread_getrusage (RUSAGE_T *r)
1262{
4b185cb3 1263 monotime(&r->real);
db9c0df9 1264 getrusage(RUSAGE_SELF, &(r->cpu));
db9c0df9
PJ
1265}
1266
d1265948
DL
1267struct thread *thread_current = NULL;
1268
718e3744 1269/* We check thread consumed time. If the system has getrusage, we'll
8b70d0b0 1270 use that to get in-depth stats on the performance of the thread in addition
1271 to wall clock time stats from gettimeofday. */
718e3744 1272void
1273thread_call (struct thread *thread)
1274{
8b70d0b0 1275 unsigned long realtime, cputime;
41af338e 1276 RUSAGE_T before, after;
cc8b13a0 1277
41af338e
JBD
1278 GETRUSAGE (&before);
1279 thread->real = before.real;
718e3744 1280
d1265948 1281 thread_current = thread;
718e3744 1282 (*thread->func) (thread);
d1265948 1283 thread_current = NULL;
718e3744 1284
41af338e 1285 GETRUSAGE (&after);
718e3744 1286
41af338e 1287 realtime = thread_consumed_time (&after, &before, &cputime);
cc8b13a0
PJ
1288 thread->hist->real.total += realtime;
1289 if (thread->hist->real.max < realtime)
1290 thread->hist->real.max = realtime;
cc8b13a0
PJ
1291 thread->hist->cpu.total += cputime;
1292 if (thread->hist->cpu.max < cputime)
1293 thread->hist->cpu.max = cputime;
e04ab74d 1294
cc8b13a0
PJ
1295 ++(thread->hist->total_calls);
1296 thread->hist->types |= (1 << thread->add_type);
718e3744 1297
924b9229 1298#ifdef CONSUMED_TIME_CHECK
8b70d0b0 1299 if (realtime > CONSUMED_TIME_CHECK)
718e3744 1300 {
1301 /*
1302 * We have a CPU Hog on our hands.
1303 * Whinge about it now, so we're aware this is yet another task
1304 * to fix.
1305 */
8b70d0b0 1306 zlog_warn ("SLOW THREAD: task %s (%lx) ran for %lums (cpu time %lums)",
924b9229 1307 thread->funcname,
1308 (unsigned long) thread->func,
8b70d0b0 1309 realtime/1000, cputime/1000);
718e3744 1310 }
924b9229 1311#endif /* CONSUMED_TIME_CHECK */
718e3744 1312}
1313
1314/* Execute thread */
1315struct thread *
e04ab74d 1316funcname_thread_execute (struct thread_master *m,
718e3744 1317 int (*func)(struct thread *),
1318 void *arg,
e04ab74d 1319 int val,
9c7753e4 1320 debugargdef)
718e3744 1321{
f7c62e11
DS
1322 struct cpu_thread_history tmp;
1323 struct thread dummy;
718e3744 1324
1325 memset (&dummy, 0, sizeof (struct thread));
1326
1327 dummy.type = THREAD_EVENT;
e04ab74d 1328 dummy.add_type = THREAD_EXECUTE;
718e3744 1329 dummy.master = NULL;
718e3744 1330 dummy.arg = arg;
1331 dummy.u.val = val;
9c7753e4 1332
f7c62e11
DS
1333 tmp.func = dummy.func = func;
1334 tmp.funcname = dummy.funcname = funcname;
1335 dummy.hist = hash_get (cpu_record, &tmp,
1336 (void * (*) (void *))cpu_record_hash_alloc);
1337
9c7753e4
DL
1338 dummy.schedfrom = schedfrom;
1339 dummy.schedfrom_line = fromln;
1340
718e3744 1341 thread_call (&dummy);
1342
1343 return NULL;
1344}