]> git.proxmox.com Git - mirror_ovs.git/blob - lib/timeval.c
Merge branch 'master' of ssh://github.com/openvswitch/ovs into master_new
[mirror_ovs.git] / lib / timeval.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "timeval.h"
19 #include <errno.h>
20 #include <poll.h>
21 #include <pthread.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/time.h>
26 #include <sys/resource.h>
27 #include <unistd.h>
28 #include "coverage.h"
29 #include "dummy.h"
30 #include "dynamic-string.h"
31 #include "fatal-signal.h"
32 #include "hash.h"
33 #include "hmap.h"
34 #include "ovs-rcu.h"
35 #include "ovs-thread.h"
36 #include "signals.h"
37 #include "seq.h"
38 #include "unixctl.h"
39 #include "util.h"
40 #include "openvswitch/vlog.h"
41
42 VLOG_DEFINE_THIS_MODULE(timeval);
43
44 #ifdef _WIN32
45 typedef unsigned int clockid_t;
46
47 #ifndef CLOCK_MONOTONIC
48 #define CLOCK_MONOTONIC 1
49 #endif
50
51 #ifndef CLOCK_REALTIME
52 #define CLOCK_REALTIME 2
53 #endif
54
55 /* Number of 100 ns intervals from January 1, 1601 till January 1, 1970. */
56 const static unsigned long long unix_epoch = 116444736000000000;
57 #endif /* _WIN32 */
58
59 /* Structure set by unixctl time/warp command. */
60 struct large_warp {
61 struct unixctl_conn *conn; /* Connection waiting for warp response. */
62 long long int total_warp; /* Total offset to be added to monotonic time. */
63 long long int warp; /* 'total_warp' offset done in steps of 'warp'. */
64 unsigned int main_thread_id; /* Identification for the main thread. */
65 };
66
67 struct clock {
68 clockid_t id; /* CLOCK_MONOTONIC or CLOCK_REALTIME. */
69
70 /* Features for use by unit tests. Protected by 'mutex'. */
71 struct ovs_mutex mutex;
72 atomic_bool slow_path; /* True if warped or stopped. */
73 struct timespec warp OVS_GUARDED; /* Offset added for unit tests. */
74 bool stopped OVS_GUARDED; /* Disable real-time updates if true. */
75 struct timespec cache OVS_GUARDED; /* Last time read from kernel. */
76 struct large_warp large_warp OVS_GUARDED; /* Connection information waiting
77 for warp response. */
78 };
79
80 /* Our clocks. */
81 static struct clock monotonic_clock; /* CLOCK_MONOTONIC, if available. */
82 static struct clock wall_clock; /* CLOCK_REALTIME. */
83
84 /* The monotonic time at which the time module was initialized. */
85 static long long int boot_time;
86
87 /* True only when timeval_dummy_register() is called. */
88 static bool timewarp_enabled;
89 /* Reference to the seq struct. Threads other than main thread can
90 * wait on timewarp_seq and be waken up when time is warped. */
91 static struct seq *timewarp_seq;
92 /* Last value of 'timewarp_seq'. */
93 DEFINE_STATIC_PER_THREAD_DATA(uint64_t, last_seq, 0);
94
95 /* Monotonic time in milliseconds at which to die with SIGALRM (if not
96 * LLONG_MAX). */
97 static long long int deadline = LLONG_MAX;
98
99 /* Monotonic time, in milliseconds, at which the last call to time_poll() woke
100 * up. */
101 DEFINE_STATIC_PER_THREAD_DATA(long long int, last_wakeup, 0);
102
103 static void log_poll_interval(long long int last_wakeup);
104 static struct rusage *get_recent_rusage(void);
105 static int getrusage_thread(struct rusage *);
106 static void refresh_rusage(void);
107 static void timespec_add(struct timespec *sum,
108 const struct timespec *a, const struct timespec *b);
109
110 static void
111 init_clock(struct clock *c, clockid_t id)
112 {
113 memset(c, 0, sizeof *c);
114 c->id = id;
115 ovs_mutex_init(&c->mutex);
116 atomic_init(&c->slow_path, false);
117 xclock_gettime(c->id, &c->cache);
118 }
119
120 static void
121 do_init_time(void)
122 {
123 struct timespec ts;
124
125 coverage_init();
126
127 timewarp_seq = seq_create();
128 init_clock(&monotonic_clock, (!clock_gettime(CLOCK_MONOTONIC, &ts)
129 ? CLOCK_MONOTONIC
130 : CLOCK_REALTIME));
131 init_clock(&wall_clock, CLOCK_REALTIME);
132 boot_time = timespec_to_msec(&monotonic_clock.cache);
133 }
134
135 /* Initializes the timetracking module, if not already initialized. */
136 static void
137 time_init(void)
138 {
139 static pthread_once_t once = PTHREAD_ONCE_INIT;
140 pthread_once(&once, do_init_time);
141 }
142
143 static void
144 time_timespec__(struct clock *c, struct timespec *ts)
145 {
146 bool slow_path;
147
148 time_init();
149
150 atomic_read_relaxed(&c->slow_path, &slow_path);
151 if (!slow_path) {
152 xclock_gettime(c->id, ts);
153 } else {
154 struct timespec warp;
155 struct timespec cache;
156 bool stopped;
157
158 ovs_mutex_lock(&c->mutex);
159 stopped = c->stopped;
160 warp = c->warp;
161 cache = c->cache;
162 ovs_mutex_unlock(&c->mutex);
163
164 if (!stopped) {
165 xclock_gettime(c->id, &cache);
166 }
167 timespec_add(ts, &cache, &warp);
168 }
169 }
170
171 /* Stores a monotonic timer, accurate within TIME_UPDATE_INTERVAL ms, into
172 * '*ts'. */
173 void
174 time_timespec(struct timespec *ts)
175 {
176 time_timespec__(&monotonic_clock, ts);
177 }
178
179 /* Stores the current time, accurate within TIME_UPDATE_INTERVAL ms, into
180 * '*ts'. */
181 void
182 time_wall_timespec(struct timespec *ts)
183 {
184 time_timespec__(&wall_clock, ts);
185 }
186
187 static time_t
188 time_sec__(struct clock *c)
189 {
190 struct timespec ts;
191
192 time_timespec__(c, &ts);
193 return ts.tv_sec;
194 }
195
196 /* Returns a monotonic timer, in seconds. */
197 time_t
198 time_now(void)
199 {
200 return time_sec__(&monotonic_clock);
201 }
202
203 /* Returns the current time, in seconds. */
204 time_t
205 time_wall(void)
206 {
207 return time_sec__(&wall_clock);
208 }
209
210 static long long int
211 time_msec__(struct clock *c)
212 {
213 struct timespec ts;
214
215 time_timespec__(c, &ts);
216 return timespec_to_msec(&ts);
217 }
218
219 /* Returns a monotonic timer, in ms (within TIME_UPDATE_INTERVAL ms). */
220 long long int
221 time_msec(void)
222 {
223 return time_msec__(&monotonic_clock);
224 }
225
226 /* Returns the current time, in ms (within TIME_UPDATE_INTERVAL ms). */
227 long long int
228 time_wall_msec(void)
229 {
230 return time_msec__(&wall_clock);
231 }
232
233 /* Configures the program to die with SIGALRM 'secs' seconds from now, if
234 * 'secs' is nonzero, or disables the feature if 'secs' is zero. */
235 void
236 time_alarm(unsigned int secs)
237 {
238 long long int now;
239 long long int msecs;
240
241 assert_single_threaded();
242 time_init();
243
244 now = time_msec();
245 msecs = secs * 1000LL;
246 deadline = now < LLONG_MAX - msecs ? now + msecs : LLONG_MAX;
247 }
248
249 /* Like poll(), except:
250 *
251 * - The timeout is specified as an absolute time, as defined by
252 * time_msec(), instead of a duration.
253 *
254 * - On error, returns a negative error code (instead of setting errno).
255 *
256 * - If interrupted by a signal, retries automatically until the original
257 * timeout is reached. (Because of this property, this function will
258 * never return -EINTR.)
259 *
260 * Stores the number of milliseconds elapsed during poll in '*elapsed'. */
261 int
262 time_poll(struct pollfd *pollfds, int n_pollfds, HANDLE *handles OVS_UNUSED,
263 long long int timeout_when, int *elapsed)
264 {
265 long long int *last_wakeup = last_wakeup_get();
266 long long int start;
267 bool quiescent;
268 int retval = 0;
269
270 time_init();
271 coverage_clear();
272 coverage_run();
273 if (*last_wakeup && !thread_is_pmd()) {
274 log_poll_interval(*last_wakeup);
275 }
276 start = time_msec();
277
278 timeout_when = MIN(timeout_when, deadline);
279 quiescent = ovsrcu_is_quiescent();
280
281 for (;;) {
282 long long int now = time_msec();
283 int time_left;
284
285 if (now >= timeout_when) {
286 time_left = 0;
287 } else if ((unsigned long long int) timeout_when - now > INT_MAX) {
288 time_left = INT_MAX;
289 } else {
290 time_left = timeout_when - now;
291 }
292
293 if (!quiescent) {
294 if (!time_left) {
295 ovsrcu_quiesce();
296 } else {
297 ovsrcu_quiesce_start();
298 }
299 }
300
301 #ifndef _WIN32
302 retval = poll(pollfds, n_pollfds, time_left);
303 if (retval < 0) {
304 retval = -errno;
305 }
306 #else
307 if (n_pollfds > MAXIMUM_WAIT_OBJECTS) {
308 VLOG_ERR("Cannot handle more than maximum wait objects\n");
309 } else if (n_pollfds != 0) {
310 retval = WaitForMultipleObjects(n_pollfds, handles, FALSE,
311 time_left);
312 }
313 if (retval < 0) {
314 /* XXX This will be replace by a win error to errno
315 conversion function */
316 retval = -WSAGetLastError();
317 retval = -EINVAL;
318 }
319 #endif
320
321 if (!quiescent && time_left) {
322 ovsrcu_quiesce_end();
323 }
324
325 if (deadline <= time_msec()) {
326 #ifndef _WIN32
327 fatal_signal_handler(SIGALRM);
328 #else
329 VLOG_ERR("wake up from WaitForMultipleObjects after deadline");
330 fatal_signal_handler(SIGTERM);
331 #endif
332 if (retval < 0) {
333 retval = 0;
334 }
335 break;
336 }
337
338 if (retval != -EINTR) {
339 break;
340 }
341 }
342 *last_wakeup = time_msec();
343 refresh_rusage();
344 *elapsed = *last_wakeup - start;
345 return retval;
346 }
347
348 long long int
349 timespec_to_msec(const struct timespec *ts)
350 {
351 return (long long int) ts->tv_sec * 1000 + ts->tv_nsec / (1000 * 1000);
352 }
353
354 long long int
355 timeval_to_msec(const struct timeval *tv)
356 {
357 return (long long int) tv->tv_sec * 1000 + tv->tv_usec / 1000;
358 }
359
360 /* Returns the monotonic time at which the "time" module was initialized, in
361 * milliseconds. */
362 long long int
363 time_boot_msec(void)
364 {
365 time_init();
366 return boot_time;
367 }
368
369 #ifdef _WIN32
370 static ULARGE_INTEGER
371 xgetfiletime(void)
372 {
373 ULARGE_INTEGER current_time;
374 FILETIME current_time_ft;
375
376 /* Returns current time in UTC as a 64-bit value representing the number
377 * of 100-nanosecond intervals since January 1, 1601 . */
378 GetSystemTimePreciseAsFileTime(&current_time_ft);
379 current_time.LowPart = current_time_ft.dwLowDateTime;
380 current_time.HighPart = current_time_ft.dwHighDateTime;
381
382 return current_time;
383 }
384
385 static int
386 clock_gettime(clock_t id, struct timespec *ts)
387 {
388 if (id == CLOCK_MONOTONIC) {
389 static LARGE_INTEGER freq;
390 LARGE_INTEGER count;
391 long long int ns;
392
393 if (!freq.QuadPart) {
394 /* Number of counts per second. */
395 QueryPerformanceFrequency(&freq);
396 }
397 /* Total number of counts from a starting point. */
398 QueryPerformanceCounter(&count);
399
400 /* Total nano seconds from a starting point. */
401 ns = (double) count.QuadPart / freq.QuadPart * 1000000000;
402
403 ts->tv_sec = count.QuadPart / freq.QuadPart;
404 ts->tv_nsec = ns % 1000000000;
405 } else if (id == CLOCK_REALTIME) {
406 ULARGE_INTEGER current_time = xgetfiletime();
407
408 /* Time from Epoch to now. */
409 ts->tv_sec = (current_time.QuadPart - unix_epoch) / 10000000;
410 ts->tv_nsec = ((current_time.QuadPart - unix_epoch) %
411 10000000) * 100;
412 } else {
413 return -1;
414 }
415
416 return 0;
417 }
418 #endif /* _WIN32 */
419
420 void
421 xgettimeofday(struct timeval *tv)
422 {
423 #ifndef _WIN32
424 if (gettimeofday(tv, NULL) == -1) {
425 VLOG_FATAL("gettimeofday failed (%s)", ovs_strerror(errno));
426 }
427 #else
428 ULARGE_INTEGER current_time = xgetfiletime();
429
430 tv->tv_sec = (current_time.QuadPart - unix_epoch) / 10000000;
431 tv->tv_usec = ((current_time.QuadPart - unix_epoch) %
432 10000000) / 10;
433 #endif
434 }
435
436 void
437 xclock_gettime(clock_t id, struct timespec *ts)
438 {
439 if (clock_gettime(id, ts) == -1) {
440 /* It seems like a bad idea to try to use vlog here because it is
441 * likely to try to check the current time. */
442 ovs_abort(errno, "xclock_gettime() failed");
443 }
444 }
445
446 static void
447 msec_to_timespec(long long int ms, struct timespec *ts)
448 {
449 ts->tv_sec = ms / 1000;
450 ts->tv_nsec = (ms % 1000) * 1000 * 1000;
451 }
452
453 static void
454 timewarp_work(void)
455 {
456 struct clock *c = &monotonic_clock;
457 struct timespec warp;
458
459 ovs_mutex_lock(&c->mutex);
460 if (!c->large_warp.conn) {
461 ovs_mutex_unlock(&c->mutex);
462 return;
463 }
464
465 if (c->large_warp.total_warp >= c->large_warp.warp) {
466 msec_to_timespec(c->large_warp.warp, &warp);
467 timespec_add(&c->warp, &c->warp, &warp);
468 c->large_warp.total_warp -= c->large_warp.warp;
469 } else if (c->large_warp.total_warp) {
470 msec_to_timespec(c->large_warp.total_warp, &warp);
471 timespec_add(&c->warp, &c->warp, &warp);
472 c->large_warp.total_warp = 0;
473 } else {
474 /* c->large_warp.total_warp is 0. */
475 msec_to_timespec(c->large_warp.warp, &warp);
476 timespec_add(&c->warp, &c->warp, &warp);
477 }
478
479 if (!c->large_warp.total_warp) {
480 unixctl_command_reply(c->large_warp.conn, "warped");
481 c->large_warp.conn = NULL;
482 }
483
484 ovs_mutex_unlock(&c->mutex);
485 seq_change(timewarp_seq);
486
487 /* give threads (eg. monitor) some chances to run */
488 #ifndef _WIN32
489 poll(NULL, 0, 10);
490 #else
491 Sleep(10);
492 #endif
493 }
494
495 /* Perform work needed for "timewarp_seq"'s producer and consumers. */
496 void
497 timewarp_run(void)
498 {
499 /* The function is a no-op unless timeval_dummy_register() is called. */
500 if (timewarp_enabled) {
501 unsigned int thread_id;
502 ovs_mutex_lock(&monotonic_clock.mutex);
503 thread_id = monotonic_clock.large_warp.main_thread_id;
504 ovs_mutex_unlock(&monotonic_clock.mutex);
505
506 if (thread_id != ovsthread_id_self()) {
507 /* For threads other than the thread that changes the sequence,
508 * wait on it. */
509 uint64_t *last_seq = last_seq_get();
510
511 *last_seq = seq_read(timewarp_seq);
512 seq_wait(timewarp_seq, *last_seq);
513 } else {
514 /* Work on adding the remaining warps. */
515 timewarp_work();
516 }
517 }
518 }
519
520 static long long int
521 timeval_diff_msec(const struct timeval *a, const struct timeval *b)
522 {
523 return timeval_to_msec(a) - timeval_to_msec(b);
524 }
525
526 static void
527 timespec_add(struct timespec *sum,
528 const struct timespec *a,
529 const struct timespec *b)
530 {
531 struct timespec tmp;
532
533 tmp.tv_sec = a->tv_sec + b->tv_sec;
534 tmp.tv_nsec = a->tv_nsec + b->tv_nsec;
535 if (tmp.tv_nsec >= 1000 * 1000 * 1000) {
536 tmp.tv_nsec -= 1000 * 1000 * 1000;
537 tmp.tv_sec++;
538 }
539
540 *sum = tmp;
541 }
542
543 static bool
544 is_warped(const struct clock *c)
545 {
546 bool warped;
547
548 ovs_mutex_lock(&c->mutex);
549 warped = monotonic_clock.warp.tv_sec || monotonic_clock.warp.tv_nsec;
550 ovs_mutex_unlock(&c->mutex);
551
552 return warped;
553 }
554
555 static void
556 log_poll_interval(long long int last_wakeup)
557 {
558 long long int interval = time_msec() - last_wakeup;
559
560 if (interval >= 1000 && !is_warped(&monotonic_clock)) {
561 const struct rusage *last_rusage = get_recent_rusage();
562 struct rusage rusage;
563
564 if (!getrusage_thread(&rusage)) {
565 VLOG_WARN("Unreasonably long %lldms poll interval"
566 " (%lldms user, %lldms system)",
567 interval,
568 timeval_diff_msec(&rusage.ru_utime,
569 &last_rusage->ru_utime),
570 timeval_diff_msec(&rusage.ru_stime,
571 &last_rusage->ru_stime));
572
573 if (rusage.ru_minflt > last_rusage->ru_minflt
574 || rusage.ru_majflt > last_rusage->ru_majflt) {
575 VLOG_WARN("faults: %ld minor, %ld major",
576 rusage.ru_minflt - last_rusage->ru_minflt,
577 rusage.ru_majflt - last_rusage->ru_majflt);
578 }
579 if (rusage.ru_inblock > last_rusage->ru_inblock
580 || rusage.ru_oublock > last_rusage->ru_oublock) {
581 VLOG_WARN("disk: %ld reads, %ld writes",
582 rusage.ru_inblock - last_rusage->ru_inblock,
583 rusage.ru_oublock - last_rusage->ru_oublock);
584 }
585 if (rusage.ru_nvcsw > last_rusage->ru_nvcsw
586 || rusage.ru_nivcsw > last_rusage->ru_nivcsw) {
587 VLOG_WARN("context switches: %ld voluntary, %ld involuntary",
588 rusage.ru_nvcsw - last_rusage->ru_nvcsw,
589 rusage.ru_nivcsw - last_rusage->ru_nivcsw);
590 }
591 } else {
592 VLOG_WARN("Unreasonably long %lldms poll interval", interval);
593 }
594 coverage_log();
595 }
596 }
597 \f
598 /* CPU usage tracking. */
599
600 struct cpu_usage {
601 long long int when; /* Time that this sample was taken. */
602 unsigned long long int cpu; /* Total user+system CPU usage when sampled. */
603 };
604
605 struct cpu_tracker {
606 struct cpu_usage older;
607 struct cpu_usage newer;
608 int cpu_usage;
609
610 struct rusage recent_rusage;
611 };
612 DEFINE_PER_THREAD_MALLOCED_DATA(struct cpu_tracker *, cpu_tracker_var);
613
614 static struct cpu_tracker *
615 get_cpu_tracker(void)
616 {
617 struct cpu_tracker *t = cpu_tracker_var_get();
618 if (!t) {
619 t = xzalloc(sizeof *t);
620 t->older.when = LLONG_MIN;
621 t->newer.when = LLONG_MIN;
622 cpu_tracker_var_set_unsafe(t);
623 }
624 return t;
625 }
626
627 static struct rusage *
628 get_recent_rusage(void)
629 {
630 return &get_cpu_tracker()->recent_rusage;
631 }
632
633 static int
634 getrusage_thread(struct rusage *rusage OVS_UNUSED)
635 {
636 #ifdef RUSAGE_THREAD
637 return getrusage(RUSAGE_THREAD, rusage);
638 #else
639 errno = EINVAL;
640 return -1;
641 #endif
642 }
643
644 static void
645 refresh_rusage(void)
646 {
647 struct cpu_tracker *t = get_cpu_tracker();
648 struct rusage *recent_rusage = &t->recent_rusage;
649
650 if (!getrusage_thread(recent_rusage)) {
651 long long int now = time_msec();
652 if (now >= t->newer.when + 3 * 1000) {
653 t->older = t->newer;
654 t->newer.when = now;
655 t->newer.cpu = (timeval_to_msec(&recent_rusage->ru_utime) +
656 timeval_to_msec(&recent_rusage->ru_stime));
657
658 if (t->older.when != LLONG_MIN && t->newer.cpu > t->older.cpu) {
659 unsigned int dividend = t->newer.cpu - t->older.cpu;
660 unsigned int divisor = (t->newer.when - t->older.when) / 100;
661 t->cpu_usage = divisor > 0 ? dividend / divisor : -1;
662 } else {
663 t->cpu_usage = -1;
664 }
665 }
666 }
667 }
668
669 /* Returns an estimate of this process's CPU usage, as a percentage, over the
670 * past few seconds of wall-clock time. Returns -1 if no estimate is available
671 * (which will happen if the process has not been running long enough to have
672 * an estimate, and can happen for other reasons as well). */
673 int
674 get_cpu_usage(void)
675 {
676 return get_cpu_tracker()->cpu_usage;
677 }
678 \f
679 /* Unixctl interface. */
680
681 /* "time/stop" stops the monotonic time returned by e.g. time_msec() from
682 * advancing, except due to later calls to "time/warp". */
683 static void
684 timeval_stop_cb(struct unixctl_conn *conn,
685 int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
686 void *aux OVS_UNUSED)
687 {
688 ovs_mutex_lock(&monotonic_clock.mutex);
689 atomic_store_relaxed(&monotonic_clock.slow_path, true);
690 monotonic_clock.stopped = true;
691 xclock_gettime(monotonic_clock.id, &monotonic_clock.cache);
692 ovs_mutex_unlock(&monotonic_clock.mutex);
693
694 unixctl_command_reply(conn, NULL);
695 }
696
697 /* "time/warp MSECS" advances the current monotonic time by the specified
698 * number of milliseconds. Unless "time/stop" has also been executed, the
699 * monotonic clock continues to tick forward at the normal rate afterward.
700 *
701 * "time/warp LARGE_MSECS MSECS" is a variation of the above command. It
702 * advances the current monotonic time by LARGE_MSECS. This is done MSECS
703 * at a time in each run of the main thread. This gives other threads
704 * time to run after the clock has been advanced by MSECS.
705 *
706 * Does not affect wall clock readings. */
707 static void
708 timeval_warp_cb(struct unixctl_conn *conn,
709 int argc OVS_UNUSED, const char *argv[], void *aux OVS_UNUSED)
710 {
711 long long int total_warp = argc > 2 ? atoll(argv[1]) : 0;
712 long long int msecs = argc > 2 ? atoll(argv[2]) : atoll(argv[1]);
713 if (msecs <= 0 || total_warp < 0) {
714 unixctl_command_reply_error(conn, "invalid MSECS");
715 return;
716 }
717
718 ovs_mutex_lock(&monotonic_clock.mutex);
719 if (monotonic_clock.large_warp.conn) {
720 ovs_mutex_unlock(&monotonic_clock.mutex);
721 unixctl_command_reply_error(conn, "A previous warp in progress");
722 return;
723 }
724 atomic_store_relaxed(&monotonic_clock.slow_path, true);
725 monotonic_clock.large_warp.conn = conn;
726 monotonic_clock.large_warp.total_warp = total_warp;
727 monotonic_clock.large_warp.warp = msecs;
728 monotonic_clock.large_warp.main_thread_id = ovsthread_id_self();
729 ovs_mutex_unlock(&monotonic_clock.mutex);
730
731 timewarp_work();
732 }
733
734 void
735 timeval_dummy_register(void)
736 {
737 timewarp_enabled = true;
738 unixctl_command_register("time/stop", "", 0, 0, timeval_stop_cb, NULL);
739 unixctl_command_register("time/warp", "[large_msecs] msecs", 1, 2,
740 timeval_warp_cb, NULL);
741 }
742
743
744
745 /* strftime() with an extension for high-resolution timestamps. Any '#'s in
746 * 'format' will be replaced by subseconds, e.g. use "%S.###" to obtain results
747 * like "01.123". */
748 size_t
749 strftime_msec(char *s, size_t max, const char *format,
750 const struct tm_msec *tm)
751 {
752 size_t n;
753
754 /* Visual Studio 2013's behavior is to crash when 0 is passed as second
755 * argument to strftime. */
756 n = max ? strftime(s, max, format, &tm->tm) : 0;
757 if (n) {
758 char decimals[4];
759 char *p;
760
761 sprintf(decimals, "%03d", tm->msec);
762 for (p = strchr(s, '#'); p; p = strchr(p, '#')) {
763 char *d = decimals;
764 while (*p == '#') {
765 *p++ = *d ? *d++ : '0';
766 }
767 }
768 }
769
770 return n;
771 }
772
773 struct tm_msec *
774 localtime_msec(long long int now, struct tm_msec *result)
775 {
776 time_t now_sec = now / 1000;
777 localtime_r(&now_sec, &result->tm);
778 result->msec = now % 1000;
779 return result;
780 }
781
782 struct tm_msec *
783 gmtime_msec(long long int now, struct tm_msec *result)
784 {
785 time_t now_sec = now / 1000;
786 gmtime_r(&now_sec, &result->tm);
787 result->msec = now % 1000;
788 return result;
789 }