]> git.proxmox.com Git - mirror_frr.git/blame - lib/thread.c
ospfd: fix route-map brokenness
[mirror_frr.git] / lib / thread.c
CommitLineData
718e3744 1/* Thread management routine
2 * Copyright (C) 1998, 2000 Kunihiro Ishiguro <kunihiro@zebra.org>
3 *
4 * This file is part of GNU Zebra.
5 *
6 * GNU Zebra is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2, or (at your option) any
9 * later version.
10 *
11 * GNU Zebra is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
896014f4
DL
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
718e3744 19 */
20
21/* #define DEBUG */
22
23#include <zebra.h>
308d14ae 24#include <sys/resource.h>
718e3744 25
26#include "thread.h"
27#include "memory.h"
28#include "log.h"
e04ab74d 29#include "hash.h"
4becea72 30#include "pqueue.h"
e04ab74d 31#include "command.h"
05c447dd 32#include "sigevent.h"
3bf2673b 33#include "network.h"
d6be5fb9 34
4a1ab8e4
DL
35DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread")
36DEFINE_MTYPE_STATIC(LIB, THREAD_MASTER, "Thread master")
37DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats")
38
3b96b781
HT
39#if defined(__APPLE__)
40#include <mach/mach.h>
41#include <mach/mach_time.h>
42#endif
43
3bf2673b
QY
44#define AWAKEN(m) \
45 do { \
46 static unsigned char wakebyte = 0x01; \
47 write (m->io_pipe[1], &wakebyte, 1); \
48 } while (0);
49
1189d95f 50static pthread_mutex_t cpu_record_mtx = PTHREAD_MUTEX_INITIALIZER;
e04ab74d 51static struct hash *cpu_record = NULL;
6b0655a2 52
816c2194 53static unsigned long
718e3744 54timeval_elapsed (struct timeval a, struct timeval b)
55{
56 return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO)
57 + (a.tv_usec - b.tv_usec));
58}
6b0655a2 59
a48b4e6d 60static unsigned int
e04ab74d 61cpu_record_hash_key (struct cpu_thread_history *a)
62{
8cc4198f 63 return (uintptr_t) a->func;
e04ab74d 64}
65
66static int
ffe11cfb
SH
67cpu_record_hash_cmp (const struct cpu_thread_history *a,
68 const struct cpu_thread_history *b)
e04ab74d 69{
70 return a->func == b->func;
71}
72
8cc4198f 73static void *
e04ab74d 74cpu_record_hash_alloc (struct cpu_thread_history *a)
75{
76 struct cpu_thread_history *new;
039b9577 77 new = XCALLOC (MTYPE_THREAD_STATS, sizeof (struct cpu_thread_history));
e04ab74d 78 new->func = a->func;
9c7753e4 79 new->funcname = a->funcname;
e04ab74d 80 return new;
81}
82
228da428
CC
83static void
84cpu_record_hash_free (void *a)
85{
86 struct cpu_thread_history *hist = a;
87
228da428
CC
88 XFREE (MTYPE_THREAD_STATS, hist);
89}
90
f63f06da 91static void
e04ab74d 92vty_out_cpu_thread_history(struct vty* vty,
93 struct cpu_thread_history *a)
94{
f7c62e11
DS
95 vty_out(vty, "%5d %10ld.%03ld %9d %8ld %9ld %8ld %9ld",
96 a->total_active, a->cpu.total/1000, a->cpu.total%1000, a->total_calls,
8b70d0b0 97 a->cpu.total/a->total_calls, a->cpu.max,
98 a->real.total/a->total_calls, a->real.max);
a587d00b 99 vty_out(vty, " %c%c%c%c%c %s%s",
e04ab74d 100 a->types & (1 << THREAD_READ) ? 'R':' ',
101 a->types & (1 << THREAD_WRITE) ? 'W':' ',
102 a->types & (1 << THREAD_TIMER) ? 'T':' ',
103 a->types & (1 << THREAD_EVENT) ? 'E':' ',
104 a->types & (1 << THREAD_EXECUTE) ? 'X':' ',
105 a->funcname, VTY_NEWLINE);
106}
107
108static void
109cpu_record_hash_print(struct hash_backet *bucket,
110 void *args[])
111{
112 struct cpu_thread_history *totals = args[0];
113 struct vty *vty = args[1];
41b2373c 114 thread_type *filter = args[2];
e04ab74d 115 struct cpu_thread_history *a = bucket->data;
f48f65d2 116
e04ab74d 117 if ( !(a->types & *filter) )
118 return;
119 vty_out_cpu_thread_history(vty,a);
f7c62e11 120 totals->total_active += a->total_active;
e04ab74d 121 totals->total_calls += a->total_calls;
8b70d0b0 122 totals->real.total += a->real.total;
123 if (totals->real.max < a->real.max)
124 totals->real.max = a->real.max;
8b70d0b0 125 totals->cpu.total += a->cpu.total;
126 if (totals->cpu.max < a->cpu.max)
127 totals->cpu.max = a->cpu.max;
e04ab74d 128}
129
130static void
41b2373c 131cpu_record_print(struct vty *vty, thread_type filter)
e04ab74d 132{
133 struct cpu_thread_history tmp;
134 void *args[3] = {&tmp, vty, &filter};
135
136 memset(&tmp, 0, sizeof tmp);
9c7753e4 137 tmp.funcname = "TOTAL";
e04ab74d 138 tmp.types = filter;
139
8b70d0b0 140 vty_out(vty, "%21s %18s %18s%s",
24f5e2fc 141 "", "CPU (user+system):", "Real (wall-clock):", VTY_NEWLINE);
f7c62e11 142 vty_out(vty, "Active Runtime(ms) Invoked Avg uSec Max uSecs");
8b70d0b0 143 vty_out(vty, " Avg uSec Max uSecs");
8b70d0b0 144 vty_out(vty, " Type Thread%s", VTY_NEWLINE);
1189d95f
QY
145
146 pthread_mutex_lock (&cpu_record_mtx);
147 {
148 hash_iterate(cpu_record,
149 (void(*)(struct hash_backet*,void*))cpu_record_hash_print,
150 args);
151 }
152 pthread_mutex_unlock (&cpu_record_mtx);
e04ab74d 153
154 if (tmp.total_calls > 0)
155 vty_out_cpu_thread_history(vty, &tmp);
156}
157
49d41a26
DS
158DEFUN (show_thread_cpu,
159 show_thread_cpu_cmd,
160 "show thread cpu [FILTER]",
161 SHOW_STR
162 "Thread information\n"
163 "Thread CPU usage\n"
164 "Display filter (rwtexb)\n")
e04ab74d 165{
c349116d 166 int idx_filter = 3;
e04ab74d 167 int i = 0;
41b2373c 168 thread_type filter = (thread_type) -1U;
e04ab74d 169
1bf1b05a 170 if (argc > 3)
e04ab74d 171 {
172 filter = 0;
c349116d 173 while (argv[idx_filter]->arg[i] != '\0')
e04ab74d 174 {
c349116d 175 switch ( argv[idx_filter]->arg[i] )
e04ab74d 176 {
177 case 'r':
178 case 'R':
179 filter |= (1 << THREAD_READ);
180 break;
181 case 'w':
182 case 'W':
183 filter |= (1 << THREAD_WRITE);
184 break;
185 case 't':
186 case 'T':
187 filter |= (1 << THREAD_TIMER);
188 break;
189 case 'e':
190 case 'E':
191 filter |= (1 << THREAD_EVENT);
192 break;
193 case 'x':
194 case 'X':
195 filter |= (1 << THREAD_EXECUTE);
196 break;
197 default:
198 break;
199 }
200 ++i;
201 }
202 if (filter == 0)
203 {
a48b4e6d 204 vty_out(vty, "Invalid filter \"%s\" specified,"
205 " must contain at least one of 'RWTEXB'%s",
c349116d 206 argv[idx_filter]->arg, VTY_NEWLINE);
e04ab74d 207 return CMD_WARNING;
208 }
209 }
210
211 cpu_record_print(vty, filter);
212 return CMD_SUCCESS;
213}
e276eb82
PJ
214
215static void
216cpu_record_hash_clear (struct hash_backet *bucket,
217 void *args)
218{
219 thread_type *filter = args;
220 struct cpu_thread_history *a = bucket->data;
f48f65d2 221
e276eb82
PJ
222 if ( !(a->types & *filter) )
223 return;
224
1189d95f
QY
225 pthread_mutex_lock (&cpu_record_mtx);
226 {
227 hash_release (cpu_record, bucket->data);
228 }
229 pthread_mutex_unlock (&cpu_record_mtx);
e276eb82
PJ
230}
231
232static void
233cpu_record_clear (thread_type filter)
234{
235 thread_type *tmp = &filter;
1189d95f
QY
236
237 pthread_mutex_lock (&cpu_record_mtx);
238 {
239 hash_iterate (cpu_record,
240 (void (*) (struct hash_backet*,void*)) cpu_record_hash_clear,
241 tmp);
242 }
243 pthread_mutex_unlock (&cpu_record_mtx);
e276eb82
PJ
244}
245
49d41a26
DS
246DEFUN (clear_thread_cpu,
247 clear_thread_cpu_cmd,
248 "clear thread cpu [FILTER]",
249 "Clear stored data\n"
250 "Thread information\n"
251 "Thread CPU usage\n"
252 "Display filter (rwtexb)\n")
e276eb82 253{
c349116d 254 int idx_filter = 3;
e276eb82
PJ
255 int i = 0;
256 thread_type filter = (thread_type) -1U;
257
1bf1b05a 258 if (argc > 3)
e276eb82
PJ
259 {
260 filter = 0;
c349116d 261 while (argv[idx_filter]->arg[i] != '\0')
e276eb82 262 {
c349116d 263 switch ( argv[idx_filter]->arg[i] )
e276eb82
PJ
264 {
265 case 'r':
266 case 'R':
267 filter |= (1 << THREAD_READ);
268 break;
269 case 'w':
270 case 'W':
271 filter |= (1 << THREAD_WRITE);
272 break;
273 case 't':
274 case 'T':
275 filter |= (1 << THREAD_TIMER);
276 break;
277 case 'e':
278 case 'E':
279 filter |= (1 << THREAD_EVENT);
280 break;
281 case 'x':
282 case 'X':
283 filter |= (1 << THREAD_EXECUTE);
284 break;
e276eb82
PJ
285 default:
286 break;
287 }
288 ++i;
289 }
290 if (filter == 0)
291 {
292 vty_out(vty, "Invalid filter \"%s\" specified,"
293 " must contain at least one of 'RWTEXB'%s",
c349116d 294 argv[idx_filter]->arg, VTY_NEWLINE);
e276eb82
PJ
295 return CMD_WARNING;
296 }
297 }
298
299 cpu_record_clear (filter);
300 return CMD_SUCCESS;
301}
6b0655a2 302
0b84f294
DL
303void
304thread_cmd_init (void)
305{
306 install_element (VIEW_NODE, &show_thread_cpu_cmd);
307 install_element (ENABLE_NODE, &clear_thread_cpu_cmd);
308}
309
4becea72
CF
310static int
311thread_timer_cmp(void *a, void *b)
312{
313 struct thread *thread_a = a;
314 struct thread *thread_b = b;
315
d9d5c3e8 316 if (timercmp (&thread_a->u.sands, &thread_b->u.sands, <))
4becea72 317 return -1;
d9d5c3e8 318 if (timercmp (&thread_a->u.sands, &thread_b->u.sands, >))
4becea72
CF
319 return 1;
320 return 0;
321}
322
323static void
324thread_timer_update(void *node, int actual_position)
325{
326 struct thread *thread = node;
327
328 thread->index = actual_position;
329}
330
718e3744 331/* Allocate new thread master. */
332struct thread_master *
0a95a0d0 333thread_master_create (void)
718e3744 334{
4becea72 335 struct thread_master *rv;
308d14ae
DV
336 struct rlimit limit;
337
338 getrlimit(RLIMIT_NOFILE, &limit);
4becea72 339
1189d95f
QY
340 pthread_mutex_lock (&cpu_record_mtx);
341 {
342 if (cpu_record == NULL)
343 cpu_record = hash_create ((unsigned int (*) (void *))cpu_record_hash_key,
344 (int (*) (const void *, const void *))
345 cpu_record_hash_cmp);
346 }
347 pthread_mutex_unlock (&cpu_record_mtx);
4becea72
CF
348
349 rv = XCALLOC (MTYPE_THREAD_MASTER, sizeof (struct thread_master));
308d14ae 350 if (rv == NULL)
1189d95f
QY
351 return NULL;
352
353 pthread_mutex_init (&rv->mtx, NULL);
308d14ae
DV
354
355 rv->fd_limit = (int)limit.rlim_cur;
356 rv->read = XCALLOC (MTYPE_THREAD, sizeof (struct thread *) * rv->fd_limit);
357 if (rv->read == NULL)
358 {
359 XFREE (MTYPE_THREAD_MASTER, rv);
360 return NULL;
361 }
362
363 rv->write = XCALLOC (MTYPE_THREAD, sizeof (struct thread *) * rv->fd_limit);
364 if (rv->write == NULL)
365 {
366 XFREE (MTYPE_THREAD, rv->read);
367 XFREE (MTYPE_THREAD_MASTER, rv);
368 return NULL;
369 }
4becea72
CF
370
371 /* Initialize the timer queues */
372 rv->timer = pqueue_create();
a587d00b
QY
373 rv->timer->cmp = thread_timer_cmp;
374 rv->timer->update = thread_timer_update;
705f2179
QY
375 rv->spin = true;
376 rv->handle_signals = true;
48cdf1a9 377 rv->owner = pthread_self();
3bf2673b
QY
378 pipe (rv->io_pipe);
379 set_nonblocking (rv->io_pipe[0]);
8c88ac94 380 set_nonblocking (rv->io_pipe[1]);
4becea72 381
b53e10a1 382 rv->handler.pfdsize = rv->fd_limit;
0a95a0d0 383 rv->handler.pfdcount = 0;
f0d975f7
DS
384 rv->handler.pfds = XCALLOC (MTYPE_THREAD_MASTER,
385 sizeof (struct pollfd) * rv->handler.pfdsize);
95db01eb
QY
386 rv->handler.copy = XCALLOC (MTYPE_THREAD_MASTER,
387 sizeof (struct pollfd) * rv->handler.pfdsize);
75bcb355 388
4becea72 389 return rv;
718e3744 390}
391
392/* Add a new thread to the list. */
393static void
394thread_list_add (struct thread_list *list, struct thread *thread)
395{
396 thread->next = NULL;
397 thread->prev = list->tail;
398 if (list->tail)
399 list->tail->next = thread;
400 else
401 list->head = thread;
402 list->tail = thread;
403 list->count++;
404}
405
718e3744 406/* Delete a thread from the list. */
407static struct thread *
408thread_list_delete (struct thread_list *list, struct thread *thread)
409{
410 if (thread->next)
411 thread->next->prev = thread->prev;
412 else
413 list->tail = thread->prev;
414 if (thread->prev)
415 thread->prev->next = thread->next;
416 else
417 list->head = thread->next;
418 thread->next = thread->prev = NULL;
419 list->count--;
420 return thread;
421}
422
495f0b13
DS
423/* Thread list is empty or not. */
424static int
425thread_empty (struct thread_list *list)
426{
427 return list->head ? 0 : 1;
428}
429
430/* Delete top of the list and return it. */
431static struct thread *
432thread_trim_head (struct thread_list *list)
433{
434 if (!thread_empty (list))
435 return thread_list_delete (list, list->head);
436 return NULL;
437}
438
718e3744 439/* Move thread to unuse list. */
440static void
441thread_add_unuse (struct thread_master *m, struct thread *thread)
442{
a48b4e6d 443 assert (m != NULL && thread != NULL);
718e3744 444 assert (thread->next == NULL);
445 assert (thread->prev == NULL);
32d86f8b 446 thread->ref = NULL;
b3f5645d
DS
447
448 thread->type = THREAD_UNUSED;
f7c62e11 449 thread->hist->total_active--;
718e3744 450 thread_list_add (&m->unuse, thread);
451}
452
453/* Free all unused thread. */
454static void
455thread_list_free (struct thread_master *m, struct thread_list *list)
456{
457 struct thread *t;
458 struct thread *next;
459
460 for (t = list->head; t; t = next)
461 {
462 next = t->next;
463 XFREE (MTYPE_THREAD, t);
464 list->count--;
465 m->alloc--;
466 }
467}
468
308d14ae
DV
469static void
470thread_array_free (struct thread_master *m, struct thread **thread_array)
471{
472 struct thread *t;
473 int index;
474
475 for (index = 0; index < m->fd_limit; ++index)
476 {
477 t = thread_array[index];
478 if (t)
479 {
480 thread_array[index] = NULL;
481 XFREE (MTYPE_THREAD, t);
482 m->alloc--;
483 }
484 }
485 XFREE (MTYPE_THREAD, thread_array);
486}
487
4becea72
CF
488static void
489thread_queue_free (struct thread_master *m, struct pqueue *queue)
490{
491 int i;
492
493 for (i = 0; i < queue->size; i++)
494 XFREE(MTYPE_THREAD, queue->array[i]);
495
496 m->alloc -= queue->size;
497 pqueue_delete(queue);
498}
499
495f0b13
DS
500/*
501 * thread_master_free_unused
502 *
503 * As threads are finished with they are put on the
504 * unuse list for later reuse.
505 * If we are shutting down, Free up unused threads
506 * So we can see if we forget to shut anything off
507 */
508void
509thread_master_free_unused (struct thread_master *m)
510{
1189d95f
QY
511 pthread_mutex_lock (&m->mtx);
512 {
513 struct thread *t;
514 while ((t = thread_trim_head(&m->unuse)) != NULL)
515 {
516 pthread_mutex_destroy (&t->mtx);
517 XFREE(MTYPE_THREAD, t);
518 }
519 }
520 pthread_mutex_unlock (&m->mtx);
495f0b13
DS
521}
522
718e3744 523/* Stop thread scheduler. */
524void
525thread_master_free (struct thread_master *m)
526{
308d14ae
DV
527 thread_array_free (m, m->read);
528 thread_array_free (m, m->write);
4becea72 529 thread_queue_free (m, m->timer);
718e3744 530 thread_list_free (m, &m->event);
531 thread_list_free (m, &m->ready);
532 thread_list_free (m, &m->unuse);
1189d95f 533 pthread_mutex_destroy (&m->mtx);
3bf2673b
QY
534 close (m->io_pipe[0]);
535 close (m->io_pipe[1]);
0a95a0d0 536
0a95a0d0 537 XFREE (MTYPE_THREAD_MASTER, m->handler.pfds);
95db01eb 538 XFREE (MTYPE_THREAD_MASTER, m->handler.copy);
718e3744 539 XFREE (MTYPE_THREAD_MASTER, m);
228da428 540
1189d95f
QY
541 pthread_mutex_lock (&cpu_record_mtx);
542 {
543 if (cpu_record)
544 {
545 hash_clean (cpu_record, cpu_record_hash_free);
546 hash_free (cpu_record);
547 cpu_record = NULL;
548 }
549 }
550 pthread_mutex_unlock (&cpu_record_mtx);
718e3744 551}
552
718e3744 553/* Return remain time in second. */
554unsigned long
555thread_timer_remain_second (struct thread *thread)
556{
1189d95f
QY
557 int64_t remain;
558
559 pthread_mutex_lock (&thread->mtx);
560 {
561 remain = monotime_until(&thread->u.sands, NULL) / 1000000LL;
562 }
563 pthread_mutex_unlock (&thread->mtx);
564
4b185cb3 565 return remain < 0 ? 0 : remain;
718e3744 566}
567
9c7753e4
DL
568#define debugargdef const char *funcname, const char *schedfrom, int fromln
569#define debugargpass funcname, schedfrom, fromln
e04ab74d 570
6ac44687
CF
571struct timeval
572thread_timer_remain(struct thread *thread)
573{
d9d5c3e8 574 struct timeval remain;
1189d95f
QY
575 pthread_mutex_lock (&thread->mtx);
576 {
577 monotime_until(&thread->u.sands, &remain);
578 }
579 pthread_mutex_unlock (&thread->mtx);
d9d5c3e8 580 return remain;
6ac44687
CF
581}
582
718e3744 583/* Get new thread. */
584static struct thread *
585thread_get (struct thread_master *m, u_char type,
9c7753e4 586 int (*func) (struct thread *), void *arg, debugargdef)
718e3744 587{
64018324 588 struct thread *thread = thread_trim_head (&m->unuse);
b3f5645d 589 struct cpu_thread_history tmp;
718e3744 590
22714f99 591 if (! thread)
718e3744 592 {
593 thread = XCALLOC (MTYPE_THREAD, sizeof (struct thread));
1189d95f
QY
594 /* mutex only needs to be initialized at struct creation. */
595 pthread_mutex_init (&thread->mtx, NULL);
718e3744 596 m->alloc++;
597 }
1189d95f 598
718e3744 599 thread->type = type;
e04ab74d 600 thread->add_type = type;
718e3744 601 thread->master = m;
718e3744 602 thread->arg = arg;
4becea72 603 thread->index = -1;
50596be0 604 thread->yield = THREAD_YIELD_TIME_SLOT; /* default */
368d025e 605 thread->ref = NULL;
4becea72 606
b3f5645d
DS
607 /*
608 * So if the passed in funcname is not what we have
609 * stored that means the thread->hist needs to be
610 * updated. We keep the last one around in unused
611 * under the assumption that we are probably
612 * going to immediately allocate the same
613 * type of thread.
614 * This hopefully saves us some serious
615 * hash_get lookups.
616 */
617 if (thread->funcname != funcname ||
618 thread->func != func)
619 {
620 tmp.func = func;
621 tmp.funcname = funcname;
1189d95f
QY
622 pthread_mutex_lock (&cpu_record_mtx);
623 {
624 thread->hist = hash_get (cpu_record, &tmp,
625 (void * (*) (void *))cpu_record_hash_alloc);
626 }
627 pthread_mutex_unlock (&cpu_record_mtx);
b3f5645d 628 }
f7c62e11 629 thread->hist->total_active++;
b3f5645d 630 thread->func = func;
9c7753e4
DL
631 thread->funcname = funcname;
632 thread->schedfrom = schedfrom;
633 thread->schedfrom_line = fromln;
e04ab74d 634
718e3744 635 return thread;
636}
637
209a72a6 638static int
75bcb355
QY
639fd_poll (struct thread_master *m, struct pollfd *pfds, nfds_t pfdsize,
640 nfds_t count, struct timeval *timer_wait)
209a72a6 641{
3bf2673b
QY
642 /* If timer_wait is null here, that means poll() should block indefinitely,
643 * unless the thread_master has overriden it by setting ->selectpoll_timeout.
644 * If the value is positive, it specifies the maximum number of milliseconds
645 * to wait. If the timeout is -1, it specifies that we should never wait and
646 * always return immediately even if no event is detected. If the value is
647 * zero, the behavior is default. */
0a95a0d0 648 int timeout = -1;
705f2179 649
3bf2673b
QY
650 /* number of file descriptors with events */
651 int num;
652
705f2179 653 if (timer_wait != NULL && m->selectpoll_timeout == 0) // use the default value
0a95a0d0 654 timeout = (timer_wait->tv_sec*1000) + (timer_wait->tv_usec/1000);
705f2179
QY
655 else if (m->selectpoll_timeout > 0) // use the user's timeout
656 timeout = m->selectpoll_timeout;
657 else if (m->selectpoll_timeout < 0) // effect a poll (return immediately)
658 timeout = 0;
0a95a0d0 659
3bf2673b
QY
660 /* add poll pipe poker */
661 assert (count + 1 < pfdsize);
662 pfds[count].fd = m->io_pipe[0];
663 pfds[count].events = POLLIN;
664 pfds[count].revents = 0x00;
665
666 num = poll (pfds, count + 1, timeout);
667
668 static unsigned char trash[64];
669 if (num > 0 && pfds[count].revents != 0 && num--)
670 while (read (m->io_pipe[0], &trash, sizeof (trash)) > 0);
0a95a0d0
DS
671
672 return num;
209a72a6
DS
673}
674
718e3744 675/* Add new read thread. */
56a94b36 676struct thread *
8dadcae7 677funcname_thread_add_read_write (int dir, struct thread_master *m,
ffa2c898
QY
678 int (*func) (struct thread *), void *arg, int fd, struct thread **t_ptr,
679 debugargdef)
718e3744 680{
8dadcae7 681 struct thread *thread = NULL;
718e3744 682
1189d95f
QY
683 pthread_mutex_lock (&m->mtx);
684 {
ffa2c898
QY
685 if (t_ptr && *t_ptr) // thread is already scheduled; don't reschedule
686 {
687 pthread_mutex_unlock (&m->mtx);
56a94b36 688 return NULL;
ffa2c898
QY
689 }
690
75bcb355
QY
691 /* default to a new pollfd */
692 nfds_t queuepos = m->handler.pfdcount;
aa037235 693
75bcb355
QY
694 /* if we already have a pollfd for our file descriptor, find and use it */
695 for (nfds_t i = 0; i < m->handler.pfdcount; i++)
696 if (m->handler.pfds[i].fd == fd)
697 {
698 queuepos = i;
699 break;
700 }
701
3bf2673b
QY
702 /* make sure we have room for this fd + pipe poker fd */
703 assert (queuepos + 1 < m->handler.pfdsize);
75bcb355
QY
704
705 thread = thread_get (m, dir, func, arg, debugargpass);
706
707 m->handler.pfds[queuepos].fd = fd;
708 m->handler.pfds[queuepos].events |= (dir == THREAD_READ ? POLLIN : POLLOUT);
709
710 if (queuepos == m->handler.pfdcount)
711 m->handler.pfdcount++;
0a95a0d0 712
1189d95f 713 if (thread)
2c70efae
QY
714 {
715 pthread_mutex_lock (&thread->mtx);
716 {
717 thread->u.fd = fd;
718 if (dir == THREAD_READ)
75bcb355 719 m->read[thread->u.fd] = thread;
2c70efae 720 else
75bcb355 721 m->write[thread->u.fd] = thread;
2c70efae
QY
722 }
723 pthread_mutex_unlock (&thread->mtx);
ffa2c898 724
368d025e
QY
725 if (t_ptr)
726 {
727 *t_ptr = thread;
728 thread->ref = t_ptr;
729 }
32d86f8b 730 }
a772d6ea
QY
731
732 AWAKEN (m);
1189d95f
QY
733 }
734 pthread_mutex_unlock (&m->mtx);
56a94b36
QY
735
736 return thread;
718e3744 737}
738
56a94b36 739static struct thread *
98c91ac6 740funcname_thread_add_timer_timeval (struct thread_master *m,
ffa2c898
QY
741 int (*func) (struct thread *), int type, void *arg,
742 struct timeval *time_relative, struct thread **t_ptr, debugargdef)
718e3744 743{
718e3744 744 struct thread *thread;
4becea72 745 struct pqueue *queue;
718e3744 746
747 assert (m != NULL);
748
a587d00b 749 assert (type == THREAD_TIMER);
a48b4e6d 750 assert (time_relative);
751
1189d95f
QY
752 pthread_mutex_lock (&m->mtx);
753 {
ffa2c898
QY
754 if (t_ptr && *t_ptr) // thread is already scheduled; don't reschedule
755 {
756 pthread_mutex_unlock (&m->mtx);
56a94b36 757 return NULL;
ffa2c898
QY
758 }
759
a587d00b 760 queue = m->timer;
1189d95f 761 thread = thread_get (m, type, func, arg, debugargpass);
2c70efae
QY
762
763 pthread_mutex_lock (&thread->mtx);
764 {
765 monotime(&thread->u.sands);
766 timeradd(&thread->u.sands, time_relative, &thread->u.sands);
767 pqueue_enqueue(thread, queue);
368d025e
QY
768 if (t_ptr)
769 {
770 *t_ptr = thread;
771 thread->ref = t_ptr;
772 }
2c70efae
QY
773 }
774 pthread_mutex_unlock (&thread->mtx);
3bf2673b
QY
775
776 AWAKEN (m);
1189d95f
QY
777 }
778 pthread_mutex_unlock (&m->mtx);
56a94b36
QY
779
780 return thread;
9e867fe6 781}
782
98c91ac6 783
784/* Add timer event thread. */
56a94b36 785struct thread *
98c91ac6 786funcname_thread_add_timer (struct thread_master *m,
ffa2c898
QY
787 int (*func) (struct thread *), void *arg, long timer,
788 struct thread **t_ptr, debugargdef)
9e867fe6 789{
98c91ac6 790 struct timeval trel;
9e867fe6 791
792 assert (m != NULL);
793
9076fbd3 794 trel.tv_sec = timer;
98c91ac6 795 trel.tv_usec = 0;
9e867fe6 796
ffa2c898
QY
797 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER, arg, &trel,
798 t_ptr, debugargpass);
98c91ac6 799}
9e867fe6 800
98c91ac6 801/* Add timer event thread with "millisecond" resolution */
56a94b36 802struct thread *
98c91ac6 803funcname_thread_add_timer_msec (struct thread_master *m,
ffa2c898
QY
804 int (*func) (struct thread *), void *arg, long timer,
805 struct thread **t_ptr, debugargdef)
98c91ac6 806{
807 struct timeval trel;
9e867fe6 808
98c91ac6 809 assert (m != NULL);
718e3744 810
af04bd7c 811 trel.tv_sec = timer / 1000;
812 trel.tv_usec = 1000*(timer % 1000);
98c91ac6 813
56a94b36
QY
814 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER, arg, &trel,
815 t_ptr, debugargpass);
a48b4e6d 816}
817
d03c4cbd 818/* Add timer event thread with "millisecond" resolution */
56a94b36 819struct thread *
d03c4cbd 820funcname_thread_add_timer_tv (struct thread_master *m,
ffa2c898
QY
821 int (*func) (struct thread *), void *arg, struct timeval *tv,
822 struct thread **t_ptr, debugargdef)
d03c4cbd 823{
56a94b36
QY
824 return funcname_thread_add_timer_timeval (m, func, THREAD_TIMER, arg, tv,
825 t_ptr, debugargpass);
d03c4cbd
DL
826}
827
718e3744 828/* Add simple event thread. */
56a94b36 829struct thread *
e04ab74d 830funcname_thread_add_event (struct thread_master *m,
ffa2c898
QY
831 int (*func) (struct thread *), void *arg, int val,
832 struct thread **t_ptr, debugargdef)
718e3744 833{
834 struct thread *thread;
835
836 assert (m != NULL);
837
1189d95f
QY
838 pthread_mutex_lock (&m->mtx);
839 {
ffa2c898
QY
840 if (t_ptr && *t_ptr) // thread is already scheduled; don't reschedule
841 {
842 pthread_mutex_unlock (&m->mtx);
56a94b36 843 return NULL;
ffa2c898
QY
844 }
845
1189d95f 846 thread = thread_get (m, THREAD_EVENT, func, arg, debugargpass);
2c70efae
QY
847 pthread_mutex_lock (&thread->mtx);
848 {
849 thread->u.val = val;
850 thread_list_add (&m->event, thread);
851 }
852 pthread_mutex_unlock (&thread->mtx);
ffa2c898
QY
853
854 if (t_ptr)
32d86f8b
QY
855 {
856 *t_ptr = thread;
857 thread->ref = t_ptr;
858 }
3bf2673b
QY
859
860 AWAKEN (m);
1189d95f
QY
861 }
862 pthread_mutex_unlock (&m->mtx);
56a94b36
QY
863
864 return thread;
718e3744 865}
866
0a95a0d0 867static void
1bba4c93 868thread_cancel_read_or_write (struct thread *thread, short int state)
0a95a0d0 869{
75bcb355 870 for (nfds_t i = 0; i < thread->master->handler.pfdcount; ++i)
0a95a0d0
DS
871 if (thread->master->handler.pfds[i].fd == thread->u.fd)
872 {
1bba4c93
DS
873 thread->master->handler.pfds[i].events &= ~(state);
874
0a95a0d0 875 /* remove thread fds from pfd list */
1bba4c93
DS
876 if (thread->master->handler.pfds[i].events == 0)
877 {
878 memmove(thread->master->handler.pfds+i,
879 thread->master->handler.pfds+i+1,
880 (thread->master->handler.pfdsize-i-1) * sizeof(struct pollfd));
881 thread->master->handler.pfdcount--;
882 return;
883 }
0a95a0d0 884 }
0a95a0d0
DS
885}
886
1189d95f
QY
887/**
888 * Cancel thread from scheduler.
889 *
2c70efae 890 * This function is *NOT* MT-safe. DO NOT call it from any other pthread except
48cdf1a9 891 * the one which owns thread->master. You will crash.
1189d95f 892 */
718e3744 893void
894thread_cancel (struct thread *thread)
895{
4becea72
CF
896 struct thread_list *list = NULL;
897 struct pqueue *queue = NULL;
308d14ae 898 struct thread **thread_array = NULL;
1189d95f 899
2c70efae 900 pthread_mutex_lock (&thread->mtx);
48cdf1a9
QY
901 pthread_mutex_lock (&thread->master->mtx);
902
903 assert (pthread_self() == thread->master->owner);
2c70efae 904
718e3744 905 switch (thread->type)
906 {
907 case THREAD_READ:
1bba4c93 908 thread_cancel_read_or_write (thread, POLLIN | POLLHUP);
308d14ae 909 thread_array = thread->master->read;
718e3744 910 break;
911 case THREAD_WRITE:
1bba4c93 912 thread_cancel_read_or_write (thread, POLLOUT | POLLHUP);
308d14ae 913 thread_array = thread->master->write;
718e3744 914 break;
915 case THREAD_TIMER:
4becea72 916 queue = thread->master->timer;
718e3744 917 break;
918 case THREAD_EVENT:
a48b4e6d 919 list = &thread->master->event;
718e3744 920 break;
921 case THREAD_READY:
a48b4e6d 922 list = &thread->master->ready;
718e3744 923 break;
924 default:
2c70efae 925 goto done;
718e3744 926 break;
927 }
4becea72
CF
928
929 if (queue)
930 {
931 assert(thread->index >= 0);
2c70efae 932 pqueue_remove (thread, queue);
4becea72
CF
933 }
934 else if (list)
935 {
936 thread_list_delete (list, thread);
937 }
308d14ae
DV
938 else if (thread_array)
939 {
75bcb355 940 thread_array[thread->u.fd] = NULL;
308d14ae 941 }
4becea72
CF
942 else
943 {
308d14ae 944 assert(!"Thread should be either in queue or list or array!");
4becea72
CF
945 }
946
32d86f8b
QY
947 if (thread->ref)
948 *thread->ref = NULL;
949
718e3744 950 thread_add_unuse (thread->master, thread);
2c70efae
QY
951
952done:
2c70efae 953 pthread_mutex_unlock (&thread->master->mtx);
48cdf1a9 954 pthread_mutex_unlock (&thread->mtx);
718e3744 955}
956
957/* Delete all events which has argument value arg. */
dc81807a 958unsigned int
718e3744 959thread_cancel_event (struct thread_master *m, void *arg)
960{
dc81807a 961 unsigned int ret = 0;
718e3744 962 struct thread *thread;
1189d95f 963 struct thread *t;
718e3744 964
1189d95f
QY
965 pthread_mutex_lock (&m->mtx);
966 {
967 thread = m->event.head;
968 while (thread)
969 {
970 t = thread;
971 pthread_mutex_lock (&t->mtx);
a48b4e6d 972 {
1189d95f
QY
973 thread = t->next;
974
975 if (t->arg == arg)
976 {
977 ret++;
978 thread_list_delete (&m->event, t);
32d86f8b
QY
979 if (t->ref)
980 *t->ref = NULL;
1189d95f
QY
981 thread_add_unuse (m, t);
982 }
a48b4e6d 983 }
1189d95f
QY
984 pthread_mutex_unlock (&t->mtx);
985 }
1b79fcb6 986
1189d95f
QY
987 /* thread can be on the ready list too */
988 thread = m->ready.head;
989 while (thread)
990 {
991 t = thread;
992 pthread_mutex_lock (&t->mtx);
1b79fcb6 993 {
1189d95f
QY
994 thread = t->next;
995
996 if (t->arg == arg)
997 {
998 ret++;
999 thread_list_delete (&m->ready, t);
32d86f8b
QY
1000 if (t->ref)
1001 *t->ref = NULL;
1189d95f
QY
1002 thread_add_unuse (m, t);
1003 }
1b79fcb6 1004 }
1189d95f
QY
1005 pthread_mutex_unlock (&t->mtx);
1006 }
1007 }
1008 pthread_mutex_unlock (&m->mtx);
dc81807a 1009 return ret;
718e3744 1010}
1011
a48b4e6d 1012static struct timeval *
4becea72 1013thread_timer_wait (struct pqueue *queue, struct timeval *timer_val)
718e3744 1014{
4becea72 1015 if (queue->size)
718e3744 1016 {
4becea72 1017 struct thread *next_timer = queue->array[0];
4b185cb3 1018 monotime_until(&next_timer->u.sands, timer_val);
718e3744 1019 return timer_val;
1020 }
1021 return NULL;
1022}
718e3744 1023
8cc4198f 1024static struct thread *
718e3744 1025thread_run (struct thread_master *m, struct thread *thread,
75bcb355 1026 struct thread *fetch)
718e3744 1027{
1028 *fetch = *thread;
718e3744 1029 thread_add_unuse (m, thread);
1030 return fetch;
1031}
1032
a48b4e6d 1033static int
75bcb355
QY
1034thread_process_io_helper (struct thread_master *m, struct thread *thread,
1035 short state, int pos)
5d4ccd4e 1036{
5d4ccd4e
DS
1037 struct thread **thread_array;
1038
1039 if (!thread)
1040 return 0;
1041
1042 if (thread->type == THREAD_READ)
0a95a0d0 1043 thread_array = m->read;
5d4ccd4e 1044 else
0a95a0d0 1045 thread_array = m->write;
5d4ccd4e 1046
75bcb355
QY
1047 thread_array[thread->u.fd] = NULL;
1048 thread_list_add (&m->ready, thread);
1049 thread->type = THREAD_READY;
1050 /* if another pthread scheduled this file descriptor for the event we're
1051 * responding to, no problem; we're getting to it now */
1052 thread->master->handler.pfds[pos].events &= ~(state);
1053 return 1;
5d4ccd4e
DS
1054}
1055
0a95a0d0 1056static void
75bcb355
QY
1057thread_process_io (struct thread_master *m, struct pollfd *pfds,
1058 unsigned int num, unsigned int count)
0a95a0d0 1059{
75bcb355
QY
1060 unsigned int ready = 0;
1061
1062 for (nfds_t i = 0; i < count && ready < num ; ++i)
0a95a0d0 1063 {
75bcb355
QY
1064 /* no event for current fd? immediately continue */
1065 if (pfds[i].revents == 0)
0a95a0d0
DS
1066 continue;
1067
c026ca1c 1068 ready++;
0a95a0d0 1069
75bcb355
QY
1070 /* Unless someone has called thread_cancel from another pthread, the only
1071 * thing that could have changed in m->handler.pfds while we were
1072 * asleep is the .events field in a given pollfd. Barring thread_cancel()
1073 * that value should be a superset of the values we have in our copy, so
1074 * there's no need to update it. Similarily, barring deletion, the fd
1075 * should still be a valid index into the master's pfds. */
1076 if (pfds[i].revents & (POLLIN | POLLHUP))
1077 thread_process_io_helper(m, m->read[pfds[i].fd], POLLIN, i);
1078 if (pfds[i].revents & POLLOUT)
1079 thread_process_io_helper(m, m->write[pfds[i].fd], POLLOUT, i);
1080
1081 /* if one of our file descriptors is garbage, remove the same from
1082 * both pfds + update sizes and index */
1083 if (pfds[i].revents & POLLNVAL)
0a95a0d0 1084 {
75bcb355
QY
1085 memmove (m->handler.pfds + i,
1086 m->handler.pfds + i + 1,
1087 (m->handler.pfdcount - i - 1) * sizeof(struct pollfd));
1088 m->handler.pfdcount--;
1089
1090 memmove (pfds + i, pfds + i + 1,
1091 (count - i - 1) * sizeof(struct pollfd));
1092 count--;
1093 i--;
0a95a0d0 1094 }
718e3744 1095 }
718e3744 1096}
1097
8b70d0b0 1098/* Add all timers that have popped to the ready list. */
a48b4e6d 1099static unsigned int
75bcb355 1100thread_process_timers (struct pqueue *queue, struct timeval *timenow)
a48b4e6d 1101{
1102 struct thread *thread;
1103 unsigned int ready = 0;
1104
4becea72 1105 while (queue->size)
8b70d0b0 1106 {
4becea72 1107 thread = queue->array[0];
d9d5c3e8 1108 if (timercmp (timenow, &thread->u.sands, <))
8b70d0b0 1109 return ready;
4becea72 1110 pqueue_dequeue(queue);
8b70d0b0 1111 thread->type = THREAD_READY;
1112 thread_list_add (&thread->master->ready, thread);
1113 ready++;
1114 }
a48b4e6d 1115 return ready;
1116}
1117
2613abe6
PJ
1118/* process a list en masse, e.g. for event thread lists */
1119static unsigned int
1120thread_process (struct thread_list *list)
1121{
1122 struct thread *thread;
b5043aab 1123 struct thread *next;
2613abe6
PJ
1124 unsigned int ready = 0;
1125
b5043aab 1126 for (thread = list->head; thread; thread = next)
2613abe6 1127 {
b5043aab 1128 next = thread->next;
2613abe6
PJ
1129 thread_list_delete (list, thread);
1130 thread->type = THREAD_READY;
1131 thread_list_add (&thread->master->ready, thread);
1132 ready++;
1133 }
1134 return ready;
1135}
1136
1137
718e3744 1138/* Fetch next ready thread. */
1139struct thread *
1140thread_fetch (struct thread_master *m, struct thread *fetch)
1141{
718e3744 1142 struct thread *thread;
4b185cb3 1143 struct timeval now;
2613abe6 1144 struct timeval timer_val = { .tv_sec = 0, .tv_usec = 0 };
2613abe6 1145 struct timeval *timer_wait = &timer_val;
718e3744 1146
705f2179 1147 do
718e3744 1148 {
a48b4e6d 1149 int num = 0;
56e2c5e8 1150
2613abe6 1151 /* Signals pre-empt everything */
705f2179
QY
1152 if (m->handle_signals)
1153 quagga_sigevent_process ();
05c447dd 1154
2c70efae 1155 pthread_mutex_lock (&m->mtx);
2613abe6
PJ
1156 /* Drain the ready queue of already scheduled jobs, before scheduling
1157 * more.
a48b4e6d 1158 */
718e3744 1159 if ((thread = thread_trim_head (&m->ready)) != NULL)
1189d95f
QY
1160 {
1161 fetch = thread_run (m, thread, fetch);
32d86f8b
QY
1162 if (fetch->ref)
1163 *fetch->ref = NULL;
1189d95f
QY
1164 pthread_mutex_unlock (&m->mtx);
1165 return fetch;
1166 }
a48b4e6d 1167
2613abe6
PJ
1168 /* To be fair to all kinds of threads, and avoid starvation, we
1169 * need to be careful to consider all thread types for scheduling
1170 * in each quanta. I.e. we should not return early from here on.
1171 */
1172
1173 /* Normal event are the next highest priority. */
1174 thread_process (&m->event);
1175
a48b4e6d 1176 /* Calculate select wait timer if nothing else to do */
2613abe6
PJ
1177 if (m->ready.count == 0)
1178 {
4becea72 1179 timer_wait = thread_timer_wait (m->timer, &timer_val);
2613abe6 1180 }
56e2c5e8 1181
e0e2a990
DL
1182 if (timer_wait && timer_wait->tv_sec < 0)
1183 {
1184 timerclear(&timer_val);
1185 timer_wait = &timer_val;
1186 }
1187
3bf2673b 1188 unsigned int count = m->handler.pfdcount + m->handler.pfdcountsnmp;
95db01eb 1189 memcpy (m->handler.copy, m->handler.pfds, count * sizeof (struct pollfd));
3bf2673b
QY
1190
1191 pthread_mutex_unlock (&m->mtx);
1192 {
95db01eb 1193 num = fd_poll (m, m->handler.copy, m->handler.pfdsize, count, timer_wait);
3bf2673b
QY
1194 }
1195 pthread_mutex_lock (&m->mtx);
95db01eb 1196
a48b4e6d 1197 /* Signals should get quick treatment */
718e3744 1198 if (num < 0)
05c447dd 1199 {
1200 if (errno == EINTR)
2c70efae
QY
1201 {
1202 pthread_mutex_unlock (&m->mtx);
1203 continue; /* signal received - process it */
1204 }
75bcb355 1205 zlog_warn ("poll() error: %s", safe_strerror (errno));
1189d95f 1206 pthread_mutex_unlock (&m->mtx);
5d4ccd4e 1207 return NULL;
05c447dd 1208 }
8b70d0b0 1209
1210 /* Check foreground timers. Historically, they have had higher
75bcb355
QY
1211 * priority than I/O threads, so let's push them onto the ready
1212 * list in front of the I/O threads. */
4b185cb3 1213 monotime(&now);
75bcb355 1214 thread_process_timers (m->timer, &now);
a48b4e6d 1215
1216 /* Got IO, process it */
1217 if (num > 0)
95db01eb 1218 thread_process_io (m, m->handler.copy, num, count);
8b70d0b0 1219
8b70d0b0 1220 if ((thread = thread_trim_head (&m->ready)) != NULL)
1189d95f
QY
1221 {
1222 fetch = thread_run (m, thread, fetch);
32d86f8b
QY
1223 if (fetch->ref)
1224 *fetch->ref = NULL;
1189d95f
QY
1225 pthread_mutex_unlock (&m->mtx);
1226 return fetch;
1227 }
2c70efae
QY
1228
1229 pthread_mutex_unlock (&m->mtx);
705f2179
QY
1230
1231 } while (m->spin);
1232
1233 return NULL;
718e3744 1234}
1235
924b9229 1236unsigned long
8b70d0b0 1237thread_consumed_time (RUSAGE_T *now, RUSAGE_T *start, unsigned long *cputime)
718e3744 1238{
718e3744 1239 /* This is 'user + sys' time. */
8b70d0b0 1240 *cputime = timeval_elapsed (now->cpu.ru_utime, start->cpu.ru_utime) +
1241 timeval_elapsed (now->cpu.ru_stime, start->cpu.ru_stime);
8b70d0b0 1242 return timeval_elapsed (now->real, start->real);
1243}
1244
50596be0
DS
1245/* We should aim to yield after yield milliseconds, which defaults
1246 to THREAD_YIELD_TIME_SLOT .
8b70d0b0 1247 Note: we are using real (wall clock) time for this calculation.
1248 It could be argued that CPU time may make more sense in certain
1249 contexts. The things to consider are whether the thread may have
1250 blocked (in which case wall time increases, but CPU time does not),
1251 or whether the system is heavily loaded with other processes competing
1252 for CPU time. On balance, wall clock time seems to make sense.
1253 Plus it has the added benefit that gettimeofday should be faster
1254 than calling getrusage. */
718e3744 1255int
1256thread_should_yield (struct thread *thread)
1257{
1189d95f
QY
1258 int result;
1259 pthread_mutex_lock (&thread->mtx);
1260 {
1261 result = monotime_since(&thread->real, NULL) > (int64_t)thread->yield;
1262 }
1263 pthread_mutex_unlock (&thread->mtx);
1264 return result;
50596be0
DS
1265}
1266
1267void
1268thread_set_yield_time (struct thread *thread, unsigned long yield_time)
1269{
1189d95f
QY
1270 pthread_mutex_lock (&thread->mtx);
1271 {
1272 thread->yield = yield_time;
1273 }
1274 pthread_mutex_unlock (&thread->mtx);
718e3744 1275}
1276
db9c0df9
PJ
1277void
1278thread_getrusage (RUSAGE_T *r)
1279{
4b185cb3 1280 monotime(&r->real);
db9c0df9 1281 getrusage(RUSAGE_SELF, &(r->cpu));
db9c0df9
PJ
1282}
1283
d1265948
DL
1284struct thread *thread_current = NULL;
1285
718e3744 1286/* We check thread consumed time. If the system has getrusage, we'll
8b70d0b0 1287 use that to get in-depth stats on the performance of the thread in addition
1288 to wall clock time stats from gettimeofday. */
718e3744 1289void
1290thread_call (struct thread *thread)
1291{
8b70d0b0 1292 unsigned long realtime, cputime;
41af338e 1293 RUSAGE_T before, after;
cc8b13a0 1294
41af338e
JBD
1295 GETRUSAGE (&before);
1296 thread->real = before.real;
718e3744 1297
d1265948 1298 thread_current = thread;
718e3744 1299 (*thread->func) (thread);
d1265948 1300 thread_current = NULL;
718e3744 1301
41af338e 1302 GETRUSAGE (&after);
718e3744 1303
41af338e 1304 realtime = thread_consumed_time (&after, &before, &cputime);
cc8b13a0
PJ
1305 thread->hist->real.total += realtime;
1306 if (thread->hist->real.max < realtime)
1307 thread->hist->real.max = realtime;
cc8b13a0
PJ
1308 thread->hist->cpu.total += cputime;
1309 if (thread->hist->cpu.max < cputime)
1310 thread->hist->cpu.max = cputime;
e04ab74d 1311
cc8b13a0
PJ
1312 ++(thread->hist->total_calls);
1313 thread->hist->types |= (1 << thread->add_type);
718e3744 1314
924b9229 1315#ifdef CONSUMED_TIME_CHECK
8b70d0b0 1316 if (realtime > CONSUMED_TIME_CHECK)
718e3744 1317 {
1318 /*
1319 * We have a CPU Hog on our hands.
1320 * Whinge about it now, so we're aware this is yet another task
1321 * to fix.
1322 */
8b70d0b0 1323 zlog_warn ("SLOW THREAD: task %s (%lx) ran for %lums (cpu time %lums)",
924b9229 1324 thread->funcname,
1325 (unsigned long) thread->func,
8b70d0b0 1326 realtime/1000, cputime/1000);
718e3744 1327 }
924b9229 1328#endif /* CONSUMED_TIME_CHECK */
718e3744 1329}
1330
1331/* Execute thread */
32d86f8b 1332void
e04ab74d 1333funcname_thread_execute (struct thread_master *m,
718e3744 1334 int (*func)(struct thread *),
1335 void *arg,
e04ab74d 1336 int val,
9c7753e4 1337 debugargdef)
718e3744 1338{
f7c62e11
DS
1339 struct cpu_thread_history tmp;
1340 struct thread dummy;
718e3744 1341
1342 memset (&dummy, 0, sizeof (struct thread));
1343
1189d95f 1344 pthread_mutex_init (&dummy.mtx, NULL);
718e3744 1345 dummy.type = THREAD_EVENT;
e04ab74d 1346 dummy.add_type = THREAD_EXECUTE;
718e3744 1347 dummy.master = NULL;
718e3744 1348 dummy.arg = arg;
1349 dummy.u.val = val;
9c7753e4 1350
f7c62e11
DS
1351 tmp.func = dummy.func = func;
1352 tmp.funcname = dummy.funcname = funcname;
1189d95f
QY
1353 pthread_mutex_lock (&cpu_record_mtx);
1354 {
1355 dummy.hist = hash_get (cpu_record, &tmp,
1356 (void * (*) (void *))cpu_record_hash_alloc);
1357 }
1358 pthread_mutex_unlock (&cpu_record_mtx);
f7c62e11 1359
9c7753e4
DL
1360 dummy.schedfrom = schedfrom;
1361 dummy.schedfrom_line = fromln;
1362
718e3744 1363 thread_call (&dummy);
718e3744 1364}