]> git.proxmox.com Git - mirror_ovs.git/blob - lib/timeval.c
json: Move from lib to include/openvswitch.
[mirror_ovs.git] / lib / timeval.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "timeval.h"
19 #include <errno.h>
20 #include <poll.h>
21 #include <pthread.h>
22 #include <signal.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/time.h>
26 #include <sys/resource.h>
27 #include <unistd.h>
28 #include "coverage.h"
29 #include "dummy.h"
30 #include "openvswitch/dynamic-string.h"
31 #include "fatal-signal.h"
32 #include "hash.h"
33 #include "openvswitch/hmap.h"
34 #include "ovs-rcu.h"
35 #include "ovs-thread.h"
36 #include "signals.h"
37 #include "seq.h"
38 #include "unixctl.h"
39 #include "util.h"
40 #include "openvswitch/vlog.h"
41
42 VLOG_DEFINE_THIS_MODULE(timeval);
43
44 #if defined(_WIN32) || defined(__MACH__)
45 typedef unsigned int clockid_t;
46 static int clock_gettime(clock_t id, struct timespec *ts);
47
48 #ifndef CLOCK_MONOTONIC
49 #define CLOCK_MONOTONIC 1
50 #endif
51
52 #ifndef CLOCK_REALTIME
53 #define CLOCK_REALTIME 2
54 #endif
55 #endif /* defined(_WIN32) || defined(__MACH__) */
56
57 #ifdef _WIN32
58 /* Number of 100 ns intervals from January 1, 1601 till January 1, 1970. */
59 const static unsigned long long unix_epoch = 116444736000000000;
60 #endif /* _WIN32 */
61
62 /* Structure set by unixctl time/warp command. */
63 struct large_warp {
64 struct unixctl_conn *conn; /* Connection waiting for warp response. */
65 long long int total_warp; /* Total offset to be added to monotonic time. */
66 long long int warp; /* 'total_warp' offset done in steps of 'warp'. */
67 unsigned int main_thread_id; /* Identification for the main thread. */
68 };
69
70 struct clock {
71 clockid_t id; /* CLOCK_MONOTONIC or CLOCK_REALTIME. */
72
73 /* Features for use by unit tests. Protected by 'mutex'. */
74 struct ovs_mutex mutex;
75 atomic_bool slow_path; /* True if warped or stopped. */
76 struct timespec warp OVS_GUARDED; /* Offset added for unit tests. */
77 bool stopped OVS_GUARDED; /* Disable real-time updates if true. */
78 struct timespec cache OVS_GUARDED; /* Last time read from kernel. */
79 struct large_warp large_warp OVS_GUARDED; /* Connection information waiting
80 for warp response. */
81 };
82
83 /* Our clocks. */
84 static struct clock monotonic_clock; /* CLOCK_MONOTONIC, if available. */
85 static struct clock wall_clock; /* CLOCK_REALTIME. */
86
87 /* The monotonic time at which the time module was initialized. */
88 static long long int boot_time;
89
90 /* True only when timeval_dummy_register() is called. */
91 static bool timewarp_enabled;
92 /* Reference to the seq struct. Threads other than main thread can
93 * wait on timewarp_seq and be waken up when time is warped. */
94 static struct seq *timewarp_seq;
95 /* Last value of 'timewarp_seq'. */
96 DEFINE_STATIC_PER_THREAD_DATA(uint64_t, last_seq, 0);
97
98 /* Monotonic time in milliseconds at which to die with SIGALRM (if not
99 * LLONG_MAX). */
100 static long long int deadline = LLONG_MAX;
101
102 /* Monotonic time, in milliseconds, at which the last call to time_poll() woke
103 * up. */
104 DEFINE_STATIC_PER_THREAD_DATA(long long int, last_wakeup, 0);
105
106 static void log_poll_interval(long long int last_wakeup);
107 static struct rusage *get_recent_rusage(void);
108 static int getrusage_thread(struct rusage *);
109 static void refresh_rusage(void);
110 static void timespec_add(struct timespec *sum,
111 const struct timespec *a, const struct timespec *b);
112
113 static void
114 init_clock(struct clock *c, clockid_t id)
115 {
116 memset(c, 0, sizeof *c);
117 c->id = id;
118 ovs_mutex_init(&c->mutex);
119 atomic_init(&c->slow_path, false);
120 xclock_gettime(c->id, &c->cache);
121 }
122
123 static void
124 do_init_time(void)
125 {
126 struct timespec ts;
127
128 coverage_init();
129
130 timewarp_seq = seq_create();
131 init_clock(&monotonic_clock, (!clock_gettime(CLOCK_MONOTONIC, &ts)
132 ? CLOCK_MONOTONIC
133 : CLOCK_REALTIME));
134 init_clock(&wall_clock, CLOCK_REALTIME);
135 boot_time = timespec_to_msec(&monotonic_clock.cache);
136 }
137
138 /* Initializes the timetracking module, if not already initialized. */
139 static void
140 time_init(void)
141 {
142 static pthread_once_t once = PTHREAD_ONCE_INIT;
143 pthread_once(&once, do_init_time);
144 }
145
146 static void
147 time_timespec__(struct clock *c, struct timespec *ts)
148 {
149 bool slow_path;
150
151 time_init();
152
153 atomic_read_relaxed(&c->slow_path, &slow_path);
154 if (!slow_path) {
155 xclock_gettime(c->id, ts);
156 } else {
157 struct timespec warp;
158 struct timespec cache;
159 bool stopped;
160
161 ovs_mutex_lock(&c->mutex);
162 stopped = c->stopped;
163 warp = c->warp;
164 cache = c->cache;
165 ovs_mutex_unlock(&c->mutex);
166
167 if (!stopped) {
168 xclock_gettime(c->id, &cache);
169 }
170 timespec_add(ts, &cache, &warp);
171 }
172 }
173
174 /* Stores a monotonic timer, accurate within TIME_UPDATE_INTERVAL ms, into
175 * '*ts'. */
176 void
177 time_timespec(struct timespec *ts)
178 {
179 time_timespec__(&monotonic_clock, ts);
180 }
181
182 /* Stores the current time, accurate within TIME_UPDATE_INTERVAL ms, into
183 * '*ts'. */
184 void
185 time_wall_timespec(struct timespec *ts)
186 {
187 time_timespec__(&wall_clock, ts);
188 }
189
190 static time_t
191 time_sec__(struct clock *c)
192 {
193 struct timespec ts;
194
195 time_timespec__(c, &ts);
196 return ts.tv_sec;
197 }
198
199 /* Returns a monotonic timer, in seconds. */
200 time_t
201 time_now(void)
202 {
203 return time_sec__(&monotonic_clock);
204 }
205
206 /* Returns the current time, in seconds. */
207 time_t
208 time_wall(void)
209 {
210 return time_sec__(&wall_clock);
211 }
212
213 static long long int
214 time_msec__(struct clock *c)
215 {
216 struct timespec ts;
217
218 time_timespec__(c, &ts);
219 return timespec_to_msec(&ts);
220 }
221
222 /* Returns a monotonic timer, in ms (within TIME_UPDATE_INTERVAL ms). */
223 long long int
224 time_msec(void)
225 {
226 return time_msec__(&monotonic_clock);
227 }
228
229 /* Returns the current time, in ms (within TIME_UPDATE_INTERVAL ms). */
230 long long int
231 time_wall_msec(void)
232 {
233 return time_msec__(&wall_clock);
234 }
235
236 /* Configures the program to die with SIGALRM 'secs' seconds from now, if
237 * 'secs' is nonzero, or disables the feature if 'secs' is zero. */
238 void
239 time_alarm(unsigned int secs)
240 {
241 long long int now;
242 long long int msecs;
243
244 assert_single_threaded();
245 time_init();
246
247 now = time_msec();
248 msecs = secs * 1000LL;
249 deadline = now < LLONG_MAX - msecs ? now + msecs : LLONG_MAX;
250 }
251
252 /* Like poll(), except:
253 *
254 * - The timeout is specified as an absolute time, as defined by
255 * time_msec(), instead of a duration.
256 *
257 * - On error, returns a negative error code (instead of setting errno).
258 *
259 * - If interrupted by a signal, retries automatically until the original
260 * timeout is reached. (Because of this property, this function will
261 * never return -EINTR.)
262 *
263 * Stores the number of milliseconds elapsed during poll in '*elapsed'. */
264 int
265 time_poll(struct pollfd *pollfds, int n_pollfds, HANDLE *handles OVS_UNUSED,
266 long long int timeout_when, int *elapsed)
267 {
268 long long int *last_wakeup = last_wakeup_get();
269 long long int start;
270 bool quiescent;
271 int retval = 0;
272
273 time_init();
274 coverage_clear();
275 coverage_run();
276 if (*last_wakeup && !thread_is_pmd()) {
277 log_poll_interval(*last_wakeup);
278 }
279 start = time_msec();
280
281 timeout_when = MIN(timeout_when, deadline);
282 quiescent = ovsrcu_is_quiescent();
283
284 for (;;) {
285 long long int now = time_msec();
286 int time_left;
287
288 if (now >= timeout_when) {
289 time_left = 0;
290 } else if ((unsigned long long int) timeout_when - now > INT_MAX) {
291 time_left = INT_MAX;
292 } else {
293 time_left = timeout_when - now;
294 }
295
296 if (!quiescent) {
297 if (!time_left) {
298 ovsrcu_quiesce();
299 } else {
300 ovsrcu_quiesce_start();
301 }
302 }
303
304 #ifndef _WIN32
305 retval = poll(pollfds, n_pollfds, time_left);
306 if (retval < 0) {
307 retval = -errno;
308 }
309 #else
310 if (n_pollfds > MAXIMUM_WAIT_OBJECTS) {
311 VLOG_ERR("Cannot handle more than maximum wait objects\n");
312 } else if (n_pollfds != 0) {
313 retval = WaitForMultipleObjects(n_pollfds, handles, FALSE,
314 time_left);
315 }
316 if (retval < 0) {
317 /* XXX This will be replace by a win error to errno
318 conversion function */
319 retval = -WSAGetLastError();
320 retval = -EINVAL;
321 }
322 #endif
323
324 if (!quiescent && time_left) {
325 ovsrcu_quiesce_end();
326 }
327
328 if (deadline <= time_msec()) {
329 #ifndef _WIN32
330 fatal_signal_handler(SIGALRM);
331 #else
332 VLOG_ERR("wake up from WaitForMultipleObjects after deadline");
333 fatal_signal_handler(SIGTERM);
334 #endif
335 if (retval < 0) {
336 retval = 0;
337 }
338 break;
339 }
340
341 if (retval != -EINTR) {
342 break;
343 }
344 }
345 *last_wakeup = time_msec();
346 refresh_rusage();
347 *elapsed = *last_wakeup - start;
348 return retval;
349 }
350
351 long long int
352 timespec_to_msec(const struct timespec *ts)
353 {
354 return (long long int) ts->tv_sec * 1000 + ts->tv_nsec / (1000 * 1000);
355 }
356
357 long long int
358 timeval_to_msec(const struct timeval *tv)
359 {
360 return (long long int) tv->tv_sec * 1000 + tv->tv_usec / 1000;
361 }
362
363 /* Returns the monotonic time at which the "time" module was initialized, in
364 * milliseconds. */
365 long long int
366 time_boot_msec(void)
367 {
368 time_init();
369 return boot_time;
370 }
371
372 #ifdef _WIN32
373 static ULARGE_INTEGER
374 xgetfiletime(void)
375 {
376 ULARGE_INTEGER current_time;
377 FILETIME current_time_ft;
378
379 /* Returns current time in UTC as a 64-bit value representing the number
380 * of 100-nanosecond intervals since January 1, 1601 . */
381 GetSystemTimePreciseAsFileTime(&current_time_ft);
382 current_time.LowPart = current_time_ft.dwLowDateTime;
383 current_time.HighPart = current_time_ft.dwHighDateTime;
384
385 return current_time;
386 }
387
388 static int
389 clock_gettime(clock_t id, struct timespec *ts)
390 {
391 if (id == CLOCK_MONOTONIC) {
392 static LARGE_INTEGER freq;
393 LARGE_INTEGER count;
394 long long int ns;
395
396 if (!freq.QuadPart) {
397 /* Number of counts per second. */
398 QueryPerformanceFrequency(&freq);
399 }
400 /* Total number of counts from a starting point. */
401 QueryPerformanceCounter(&count);
402
403 /* Total nano seconds from a starting point. */
404 ns = (double) count.QuadPart / freq.QuadPart * 1000000000;
405
406 ts->tv_sec = count.QuadPart / freq.QuadPart;
407 ts->tv_nsec = ns % 1000000000;
408 } else if (id == CLOCK_REALTIME) {
409 ULARGE_INTEGER current_time = xgetfiletime();
410
411 /* Time from Epoch to now. */
412 ts->tv_sec = (current_time.QuadPart - unix_epoch) / 10000000;
413 ts->tv_nsec = ((current_time.QuadPart - unix_epoch) %
414 10000000) * 100;
415 } else {
416 return -1;
417 }
418
419 return 0;
420 }
421 #endif /* _WIN32 */
422
423 #ifdef __MACH__
424 #include <mach/clock.h>
425 #include <mach/mach.h>
426 static int
427 clock_gettime(clock_t id, struct timespec *ts)
428 {
429 mach_timespec_t mts;
430 clock_serv_t clk;
431 clock_id_t cid;
432
433 if (id == CLOCK_MONOTONIC) {
434 cid = SYSTEM_CLOCK;
435 } else if (id == CLOCK_REALTIME) {
436 cid = CALENDAR_CLOCK;
437 } else {
438 return -1;
439 }
440
441 host_get_clock_service(mach_host_self(), cid, &clk);
442 clock_get_time(clk, &mts);
443 mach_port_deallocate(mach_task_self(), clk);
444 ts->tv_sec = mts.tv_sec;
445 ts->tv_nsec = mts.tv_nsec;
446
447 return 0;
448 }
449 #endif
450
451 void
452 xgettimeofday(struct timeval *tv)
453 {
454 #ifndef _WIN32
455 if (gettimeofday(tv, NULL) == -1) {
456 VLOG_FATAL("gettimeofday failed (%s)", ovs_strerror(errno));
457 }
458 #else
459 ULARGE_INTEGER current_time = xgetfiletime();
460
461 tv->tv_sec = (current_time.QuadPart - unix_epoch) / 10000000;
462 tv->tv_usec = ((current_time.QuadPart - unix_epoch) %
463 10000000) / 10;
464 #endif
465 }
466
467 void
468 xclock_gettime(clock_t id, struct timespec *ts)
469 {
470 if (clock_gettime(id, ts) == -1) {
471 /* It seems like a bad idea to try to use vlog here because it is
472 * likely to try to check the current time. */
473 ovs_abort(errno, "xclock_gettime() failed");
474 }
475 }
476
477 static void
478 msec_to_timespec(long long int ms, struct timespec *ts)
479 {
480 ts->tv_sec = ms / 1000;
481 ts->tv_nsec = (ms % 1000) * 1000 * 1000;
482 }
483
484 static void
485 timewarp_work(void)
486 {
487 struct clock *c = &monotonic_clock;
488 struct timespec warp;
489
490 ovs_mutex_lock(&c->mutex);
491 if (!c->large_warp.conn) {
492 ovs_mutex_unlock(&c->mutex);
493 return;
494 }
495
496 if (c->large_warp.total_warp >= c->large_warp.warp) {
497 msec_to_timespec(c->large_warp.warp, &warp);
498 timespec_add(&c->warp, &c->warp, &warp);
499 c->large_warp.total_warp -= c->large_warp.warp;
500 } else if (c->large_warp.total_warp) {
501 msec_to_timespec(c->large_warp.total_warp, &warp);
502 timespec_add(&c->warp, &c->warp, &warp);
503 c->large_warp.total_warp = 0;
504 } else {
505 /* c->large_warp.total_warp is 0. */
506 msec_to_timespec(c->large_warp.warp, &warp);
507 timespec_add(&c->warp, &c->warp, &warp);
508 }
509
510 if (!c->large_warp.total_warp) {
511 unixctl_command_reply(c->large_warp.conn, "warped");
512 c->large_warp.conn = NULL;
513 }
514
515 ovs_mutex_unlock(&c->mutex);
516 seq_change(timewarp_seq);
517
518 /* give threads (eg. monitor) some chances to run */
519 #ifndef _WIN32
520 poll(NULL, 0, 10);
521 #else
522 Sleep(10);
523 #endif
524 }
525
526 /* Perform work needed for "timewarp_seq"'s producer and consumers. */
527 void
528 timewarp_run(void)
529 {
530 /* The function is a no-op unless timeval_dummy_register() is called. */
531 if (timewarp_enabled) {
532 unsigned int thread_id;
533 ovs_mutex_lock(&monotonic_clock.mutex);
534 thread_id = monotonic_clock.large_warp.main_thread_id;
535 ovs_mutex_unlock(&monotonic_clock.mutex);
536
537 if (thread_id != ovsthread_id_self()) {
538 /* For threads other than the thread that changes the sequence,
539 * wait on it. */
540 uint64_t *last_seq = last_seq_get();
541
542 *last_seq = seq_read(timewarp_seq);
543 seq_wait(timewarp_seq, *last_seq);
544 } else {
545 /* Work on adding the remaining warps. */
546 timewarp_work();
547 }
548 }
549 }
550
551 static long long int
552 timeval_diff_msec(const struct timeval *a, const struct timeval *b)
553 {
554 return timeval_to_msec(a) - timeval_to_msec(b);
555 }
556
557 static void
558 timespec_add(struct timespec *sum,
559 const struct timespec *a,
560 const struct timespec *b)
561 {
562 struct timespec tmp;
563
564 tmp.tv_sec = a->tv_sec + b->tv_sec;
565 tmp.tv_nsec = a->tv_nsec + b->tv_nsec;
566 if (tmp.tv_nsec >= 1000 * 1000 * 1000) {
567 tmp.tv_nsec -= 1000 * 1000 * 1000;
568 tmp.tv_sec++;
569 }
570
571 *sum = tmp;
572 }
573
574 static bool
575 is_warped(const struct clock *c)
576 {
577 bool warped;
578
579 ovs_mutex_lock(&c->mutex);
580 warped = monotonic_clock.warp.tv_sec || monotonic_clock.warp.tv_nsec;
581 ovs_mutex_unlock(&c->mutex);
582
583 return warped;
584 }
585
586 static void
587 log_poll_interval(long long int last_wakeup)
588 {
589 long long int interval = time_msec() - last_wakeup;
590
591 if (interval >= 1000 && !is_warped(&monotonic_clock)) {
592 const struct rusage *last_rusage = get_recent_rusage();
593 struct rusage rusage;
594
595 if (!getrusage_thread(&rusage)) {
596 VLOG_WARN("Unreasonably long %lldms poll interval"
597 " (%lldms user, %lldms system)",
598 interval,
599 timeval_diff_msec(&rusage.ru_utime,
600 &last_rusage->ru_utime),
601 timeval_diff_msec(&rusage.ru_stime,
602 &last_rusage->ru_stime));
603
604 if (rusage.ru_minflt > last_rusage->ru_minflt
605 || rusage.ru_majflt > last_rusage->ru_majflt) {
606 VLOG_WARN("faults: %ld minor, %ld major",
607 rusage.ru_minflt - last_rusage->ru_minflt,
608 rusage.ru_majflt - last_rusage->ru_majflt);
609 }
610 if (rusage.ru_inblock > last_rusage->ru_inblock
611 || rusage.ru_oublock > last_rusage->ru_oublock) {
612 VLOG_WARN("disk: %ld reads, %ld writes",
613 rusage.ru_inblock - last_rusage->ru_inblock,
614 rusage.ru_oublock - last_rusage->ru_oublock);
615 }
616 if (rusage.ru_nvcsw > last_rusage->ru_nvcsw
617 || rusage.ru_nivcsw > last_rusage->ru_nivcsw) {
618 VLOG_WARN("context switches: %ld voluntary, %ld involuntary",
619 rusage.ru_nvcsw - last_rusage->ru_nvcsw,
620 rusage.ru_nivcsw - last_rusage->ru_nivcsw);
621 }
622 } else {
623 VLOG_WARN("Unreasonably long %lldms poll interval", interval);
624 }
625 coverage_log();
626 }
627 }
628 \f
629 /* CPU usage tracking. */
630
631 struct cpu_usage {
632 long long int when; /* Time that this sample was taken. */
633 unsigned long long int cpu; /* Total user+system CPU usage when sampled. */
634 };
635
636 struct cpu_tracker {
637 struct cpu_usage older;
638 struct cpu_usage newer;
639 int cpu_usage;
640
641 struct rusage recent_rusage;
642 };
643 DEFINE_PER_THREAD_MALLOCED_DATA(struct cpu_tracker *, cpu_tracker_var);
644
645 static struct cpu_tracker *
646 get_cpu_tracker(void)
647 {
648 struct cpu_tracker *t = cpu_tracker_var_get();
649 if (!t) {
650 t = xzalloc(sizeof *t);
651 t->older.when = LLONG_MIN;
652 t->newer.when = LLONG_MIN;
653 cpu_tracker_var_set_unsafe(t);
654 }
655 return t;
656 }
657
658 static struct rusage *
659 get_recent_rusage(void)
660 {
661 return &get_cpu_tracker()->recent_rusage;
662 }
663
664 static int
665 getrusage_thread(struct rusage *rusage OVS_UNUSED)
666 {
667 #ifdef RUSAGE_THREAD
668 return getrusage(RUSAGE_THREAD, rusage);
669 #else
670 errno = EINVAL;
671 return -1;
672 #endif
673 }
674
675 static void
676 refresh_rusage(void)
677 {
678 struct cpu_tracker *t = get_cpu_tracker();
679 struct rusage *recent_rusage = &t->recent_rusage;
680
681 if (!getrusage_thread(recent_rusage)) {
682 long long int now = time_msec();
683 if (now >= t->newer.when + 3 * 1000) {
684 t->older = t->newer;
685 t->newer.when = now;
686 t->newer.cpu = (timeval_to_msec(&recent_rusage->ru_utime) +
687 timeval_to_msec(&recent_rusage->ru_stime));
688
689 if (t->older.when != LLONG_MIN && t->newer.cpu > t->older.cpu) {
690 unsigned int dividend = t->newer.cpu - t->older.cpu;
691 unsigned int divisor = (t->newer.when - t->older.when) / 100;
692 t->cpu_usage = divisor > 0 ? dividend / divisor : -1;
693 } else {
694 t->cpu_usage = -1;
695 }
696 }
697 }
698 }
699
700 /* Returns an estimate of this process's CPU usage, as a percentage, over the
701 * past few seconds of wall-clock time. Returns -1 if no estimate is available
702 * (which will happen if the process has not been running long enough to have
703 * an estimate, and can happen for other reasons as well). */
704 int
705 get_cpu_usage(void)
706 {
707 return get_cpu_tracker()->cpu_usage;
708 }
709 \f
710 /* Unixctl interface. */
711
712 /* "time/stop" stops the monotonic time returned by e.g. time_msec() from
713 * advancing, except due to later calls to "time/warp". */
714 static void
715 timeval_stop_cb(struct unixctl_conn *conn,
716 int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
717 void *aux OVS_UNUSED)
718 {
719 ovs_mutex_lock(&monotonic_clock.mutex);
720 atomic_store_relaxed(&monotonic_clock.slow_path, true);
721 monotonic_clock.stopped = true;
722 xclock_gettime(monotonic_clock.id, &monotonic_clock.cache);
723 ovs_mutex_unlock(&monotonic_clock.mutex);
724
725 unixctl_command_reply(conn, NULL);
726 }
727
728 /* "time/warp MSECS" advances the current monotonic time by the specified
729 * number of milliseconds. Unless "time/stop" has also been executed, the
730 * monotonic clock continues to tick forward at the normal rate afterward.
731 *
732 * "time/warp LARGE_MSECS MSECS" is a variation of the above command. It
733 * advances the current monotonic time by LARGE_MSECS. This is done MSECS
734 * at a time in each run of the main thread. This gives other threads
735 * time to run after the clock has been advanced by MSECS.
736 *
737 * Does not affect wall clock readings. */
738 static void
739 timeval_warp_cb(struct unixctl_conn *conn,
740 int argc OVS_UNUSED, const char *argv[], void *aux OVS_UNUSED)
741 {
742 long long int total_warp = argc > 2 ? atoll(argv[1]) : 0;
743 long long int msecs = argc > 2 ? atoll(argv[2]) : atoll(argv[1]);
744 if (msecs <= 0 || total_warp < 0) {
745 unixctl_command_reply_error(conn, "invalid MSECS");
746 return;
747 }
748
749 ovs_mutex_lock(&monotonic_clock.mutex);
750 if (monotonic_clock.large_warp.conn) {
751 ovs_mutex_unlock(&monotonic_clock.mutex);
752 unixctl_command_reply_error(conn, "A previous warp in progress");
753 return;
754 }
755 atomic_store_relaxed(&monotonic_clock.slow_path, true);
756 monotonic_clock.large_warp.conn = conn;
757 monotonic_clock.large_warp.total_warp = total_warp;
758 monotonic_clock.large_warp.warp = msecs;
759 monotonic_clock.large_warp.main_thread_id = ovsthread_id_self();
760 ovs_mutex_unlock(&monotonic_clock.mutex);
761
762 timewarp_work();
763 }
764
765 void
766 timeval_dummy_register(void)
767 {
768 timewarp_enabled = true;
769 unixctl_command_register("time/stop", "", 0, 0, timeval_stop_cb, NULL);
770 unixctl_command_register("time/warp", "[large_msecs] msecs", 1, 2,
771 timeval_warp_cb, NULL);
772 }
773
774
775
776 /* strftime() with an extension for high-resolution timestamps. Any '#'s in
777 * 'format' will be replaced by subseconds, e.g. use "%S.###" to obtain results
778 * like "01.123". */
779 size_t
780 strftime_msec(char *s, size_t max, const char *format,
781 const struct tm_msec *tm)
782 {
783 size_t n;
784
785 /* Visual Studio 2013's behavior is to crash when 0 is passed as second
786 * argument to strftime. */
787 n = max ? strftime(s, max, format, &tm->tm) : 0;
788 if (n) {
789 char decimals[4];
790 char *p;
791
792 sprintf(decimals, "%03d", tm->msec);
793 for (p = strchr(s, '#'); p; p = strchr(p, '#')) {
794 char *d = decimals;
795 while (*p == '#') {
796 *p++ = *d ? *d++ : '0';
797 }
798 }
799 }
800
801 return n;
802 }
803
804 struct tm_msec *
805 localtime_msec(long long int now, struct tm_msec *result)
806 {
807 time_t now_sec = now / 1000;
808 localtime_r(&now_sec, &result->tm);
809 result->msec = now % 1000;
810 return result;
811 }
812
813 struct tm_msec *
814 gmtime_msec(long long int now, struct tm_msec *result)
815 {
816 time_t now_sec = now / 1000;
817 gmtime_r(&now_sec, &result->tm);
818 result->msec = now % 1000;
819 return result;
820 }