]> git.proxmox.com Git - ceph.git/blame - ceph/src/civetweb/src/timer.inl
buildsys: switch source download to quincy
[ceph.git] / ceph / src / civetweb / src / timer.inl
CommitLineData
11fdf7f2
TL
1/* This file is part of the CivetWeb web server.
2 * See https://github.com/civetweb/civetweb/
3 * (C) 2014-2017 by the CivetWeb authors, MIT license.
4 */
7c673cae
FG
5
6#if !defined(MAX_TIMERS)
7#define MAX_TIMERS MAX_WORKER_THREADS
8#endif
9
10typedef int (*taction)(void *arg);
11
12struct ttimer {
13 double time;
14 double period;
15 taction action;
16 void *arg;
17};
18
19struct ttimers {
20 pthread_t threadid; /* Timer thread ID */
21 pthread_mutex_t mutex; /* Protects timer lists */
22 struct ttimer timers[MAX_TIMERS]; /* List of timers */
23 unsigned timer_count; /* Current size of timer list */
24};
25
11fdf7f2
TL
26
27TIMER_API double
28timer_getcurrenttime(void)
29{
30#if defined(_WIN32)
31 /* GetTickCount returns milliseconds since system start as
32 * unsigned 32 bit value. It will wrap around every 49.7 days.
33 * We need to use a 64 bit counter (will wrap in 500 mio. years),
34 * by adding the 32 bit difference since the last call to a
35 * 64 bit counter. This algorithm will only work, if this
36 * function is called at least once every 7 weeks. */
37 static DWORD last_tick;
38 static uint64_t now_tick64;
39
40 DWORD now_tick = GetTickCount();
41
42 now_tick64 += ((DWORD)(now_tick - last_tick));
43 last_tick = now_tick;
44 return (double)now_tick64 * 1.0E-3;
45#else
46 struct timespec now_ts;
47
48 clock_gettime(CLOCK_MONOTONIC, &now_ts);
49 return (double)now_ts.tv_sec + (double)now_ts.tv_nsec * 1.0E-9;
50#endif
51}
52
53
54TIMER_API int
7c673cae
FG
55timer_add(struct mg_context *ctx,
56 double next_time,
57 double period,
58 int is_relative,
59 taction action,
60 void *arg)
61{
62 unsigned u, v;
63 int error = 0;
11fdf7f2 64 double now;
7c673cae
FG
65
66 if (ctx->stop_flag) {
67 return 0;
68 }
69
11fdf7f2
TL
70 now = timer_getcurrenttime();
71
72 /* HCP24: if is_relative = 0 and next_time < now
73 * action will be called so fast as possible
74 * if additional period > 0
75 * action will be called so fast as possible
76 * n times until (next_time + (n * period)) > now
77 * then the period is working
78 * Solution:
79 * if next_time < now then we set next_time = now.
80 * The first callback will be so fast as possible (now)
81 * but the next callback on period
82 */
7c673cae 83 if (is_relative) {
11fdf7f2
TL
84 next_time += now;
85 }
86
87 /* You can not set timers into the past */
88 if (next_time < now) {
89 next_time = now;
7c673cae
FG
90 }
91
92 pthread_mutex_lock(&ctx->timers->mutex);
93 if (ctx->timers->timer_count == MAX_TIMERS) {
94 error = 1;
95 } else {
11fdf7f2
TL
96 /* Insert new timer into a sorted list. */
97 /* The linear list is still most efficient for short lists (small
98 * number of timers) - if there are many timers, different
99 * algorithms will work better. */
7c673cae 100 for (u = 0; u < ctx->timers->timer_count; u++) {
11fdf7f2
TL
101 if (ctx->timers->timers[u].time > next_time) {
102 /* HCP24: moving all timers > next_time */
7c673cae
FG
103 for (v = ctx->timers->timer_count; v > u; v--) {
104 ctx->timers->timers[v] = ctx->timers->timers[v - 1];
105 }
106 break;
107 }
108 }
109 ctx->timers->timers[u].time = next_time;
110 ctx->timers->timers[u].period = period;
111 ctx->timers->timers[u].action = action;
112 ctx->timers->timers[u].arg = arg;
113 ctx->timers->timer_count++;
114 }
115 pthread_mutex_unlock(&ctx->timers->mutex);
116 return error;
117}
118
11fdf7f2 119
7c673cae
FG
120static void
121timer_thread_run(void *thread_func_param)
122{
123 struct mg_context *ctx = (struct mg_context *)thread_func_param;
7c673cae
FG
124 double d;
125 unsigned u;
126 int re_schedule;
127 struct ttimer t;
128
129 mg_set_thread_name("timer");
130
131 if (ctx->callbacks.init_thread) {
132 /* Timer thread */
133 ctx->callbacks.init_thread(ctx, 2);
134 }
135
11fdf7f2
TL
136 d = timer_getcurrenttime();
137
7c673cae
FG
138 while (ctx->stop_flag == 0) {
139 pthread_mutex_lock(&ctx->timers->mutex);
11fdf7f2
TL
140 if ((ctx->timers->timer_count > 0)
141 && (d >= ctx->timers->timers[0].time)) {
7c673cae
FG
142 t = ctx->timers->timers[0];
143 for (u = 1; u < ctx->timers->timer_count; u++) {
144 ctx->timers->timers[u - 1] = ctx->timers->timers[u];
145 }
146 ctx->timers->timer_count--;
147 pthread_mutex_unlock(&ctx->timers->mutex);
148 re_schedule = t.action(t.arg);
149 if (re_schedule && (t.period > 0)) {
150 timer_add(ctx, t.time + t.period, t.period, 0, t.action, t.arg);
151 }
152 continue;
153 } else {
154 pthread_mutex_unlock(&ctx->timers->mutex);
155 }
11fdf7f2
TL
156
157/* 10 ms seems reasonable.
158 * A faster loop (smaller sleep value) increases CPU load,
159 * a slower loop (higher sleep value) decreases timer accuracy.
160 */
161#ifdef _WIN32
162 Sleep(10);
163#else
164 usleep(10000);
7c673cae 165#endif
11fdf7f2
TL
166
167 d = timer_getcurrenttime();
168 }
169
170 pthread_mutex_lock(&ctx->timers->mutex);
171 ctx->timers->timer_count = 0;
172 pthread_mutex_unlock(&ctx->timers->mutex);
7c673cae
FG
173}
174
11fdf7f2 175
7c673cae
FG
176#ifdef _WIN32
177static unsigned __stdcall timer_thread(void *thread_func_param)
178{
179 timer_thread_run(thread_func_param);
180 return 0;
181}
182#else
183static void *
184timer_thread(void *thread_func_param)
185{
186 timer_thread_run(thread_func_param);
187 return NULL;
188}
189#endif /* _WIN32 */
190
11fdf7f2
TL
191
192TIMER_API int
7c673cae
FG
193timers_init(struct mg_context *ctx)
194{
11fdf7f2
TL
195 ctx->timers =
196 (struct ttimers *)mg_calloc_ctx(sizeof(struct ttimers), 1, ctx);
7c673cae
FG
197 (void)pthread_mutex_init(&ctx->timers->mutex, NULL);
198
11fdf7f2
TL
199 (void)timer_getcurrenttime();
200
7c673cae
FG
201 /* Start timer thread */
202 mg_start_thread_with_id(timer_thread, ctx, &ctx->timers->threadid);
203
204 return 0;
205}
206
11fdf7f2
TL
207
208TIMER_API void
7c673cae
FG
209timers_exit(struct mg_context *ctx)
210{
211 if (ctx->timers) {
11fdf7f2
TL
212 pthread_mutex_lock(&ctx->timers->mutex);
213 ctx->timers->timer_count = 0;
214
215 mg_join_thread(ctx->timers->threadid);
216
217 /* TODO: Do we really need to unlock the mutex, before
218 * destroying it, if it's destroyed by the thread currently
219 * owning the mutex? */
220 pthread_mutex_unlock(&ctx->timers->mutex);
7c673cae
FG
221 (void)pthread_mutex_destroy(&ctx->timers->mutex);
222 mg_free(ctx->timers);
223 }
224}
11fdf7f2
TL
225
226
227/* End of timer.inl */