]> git.proxmox.com Git - ceph.git/blob - ceph/src/jaegertracing/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/civetweb/src/timer.inl
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / jaegertracing / opentelemetry-cpp / third_party / prometheus-cpp / 3rdparty / civetweb / src / timer.inl
1 /* This file is part of the CivetWeb web server.
2 * See https://github.com/civetweb/civetweb/
3 * (C) 2014-2021 by the CivetWeb authors, MIT license.
4 */
5
6 #if !defined(MAX_TIMERS)
7 #define MAX_TIMERS MAX_WORKER_THREADS
8 #endif
9 #if !defined(TIMER_RESOLUTION)
10 /* Timer resolution in ms */
11 #define TIMER_RESOLUTION (10)
12 #endif
13
14 typedef int (*taction)(void *arg);
15 typedef void (*tcancelaction)(void *arg);
16
17 struct ttimer {
18 double time;
19 double period;
20 taction action;
21 void *arg;
22 tcancelaction cancel;
23 };
24
25 struct ttimers {
26 pthread_t threadid; /* Timer thread ID */
27 pthread_mutex_t mutex; /* Protects timer lists */
28 struct ttimer *timers; /* List of timers */
29 unsigned timer_count; /* Current size of timer list */
30 unsigned timer_capacity; /* Capacity of timer list */
31 #if defined(_WIN32)
32 DWORD last_tick;
33 uint64_t now_tick64;
34 #endif
35 };
36
37
38 TIMER_API double
39 timer_getcurrenttime(struct mg_context *ctx)
40 {
41 #if defined(_WIN32)
42 /* GetTickCount returns milliseconds since system start as
43 * unsigned 32 bit value. It will wrap around every 49.7 days.
44 * We need to use a 64 bit counter (will wrap in 500 mio. years),
45 * by adding the 32 bit difference since the last call to a
46 * 64 bit counter. This algorithm will only work, if this
47 * function is called at least once every 7 weeks. */
48 uint64_t now_tick64 = 0;
49 DWORD now_tick = GetTickCount();
50
51 if (ctx->timers) {
52 pthread_mutex_lock(&ctx->timers->mutex);
53 ctx->timers->now_tick64 += now_tick - ctx->timers->last_tick;
54 now_tick64 = ctx->timers->now_tick64;
55 ctx->timers->last_tick = now_tick;
56 pthread_mutex_unlock(&ctx->timers->mutex);
57 }
58 return (double)now_tick64 * 1.0E-3;
59 #else
60 struct timespec now_ts;
61
62 (void)ctx;
63 clock_gettime(CLOCK_MONOTONIC, &now_ts);
64 return (double)now_ts.tv_sec + (double)now_ts.tv_nsec * 1.0E-9;
65 #endif
66 }
67
68
69 TIMER_API int
70 timer_add(struct mg_context *ctx,
71 double next_time,
72 double period,
73 int is_relative,
74 taction action,
75 void *arg,
76 tcancelaction cancel)
77 {
78 int error = 0;
79 double now;
80
81 if (!ctx->timers) {
82 return 1;
83 }
84
85 now = timer_getcurrenttime(ctx);
86
87 /* HCP24: if is_relative = 0 and next_time < now
88 * action will be called so fast as possible
89 * if additional period > 0
90 * action will be called so fast as possible
91 * n times until (next_time + (n * period)) > now
92 * then the period is working
93 * Solution:
94 * if next_time < now then we set next_time = now.
95 * The first callback will be so fast as possible (now)
96 * but the next callback on period
97 */
98 if (is_relative) {
99 next_time += now;
100 }
101
102 /* You can not set timers into the past */
103 if (next_time < now) {
104 next_time = now;
105 }
106
107 pthread_mutex_lock(&ctx->timers->mutex);
108 if (ctx->timers->timer_count == MAX_TIMERS) {
109 error = 1;
110 } else if (ctx->timers->timer_count == ctx->timers->timer_capacity) {
111 unsigned capacity = (ctx->timers->timer_capacity * 2) + 1;
112 struct ttimer *timers =
113 (struct ttimer *)mg_realloc_ctx(ctx->timers->timers,
114 capacity * sizeof(struct ttimer),
115 ctx);
116 if (timers) {
117 ctx->timers->timers = timers;
118 ctx->timers->timer_capacity = capacity;
119 } else {
120 error = 1;
121 }
122 }
123 if (!error) {
124 /* Insert new timer into a sorted list. */
125 /* The linear list is still most efficient for short lists (small
126 * number of timers) - if there are many timers, different
127 * algorithms will work better. */
128 unsigned u = ctx->timers->timer_count;
129 for (; (u > 0) && (ctx->timers->timers[u - 1].time > next_time); u--) {
130 ctx->timers->timers[u] = ctx->timers->timers[u - 1];
131 }
132 ctx->timers->timers[u].time = next_time;
133 ctx->timers->timers[u].period = period;
134 ctx->timers->timers[u].action = action;
135 ctx->timers->timers[u].arg = arg;
136 ctx->timers->timers[u].cancel = cancel;
137 ctx->timers->timer_count++;
138 }
139 pthread_mutex_unlock(&ctx->timers->mutex);
140 return error;
141 }
142
143
144 static void
145 timer_thread_run(void *thread_func_param)
146 {
147 struct mg_context *ctx = (struct mg_context *)thread_func_param;
148 double d;
149 unsigned u;
150 int action_res;
151 struct ttimer t;
152
153 mg_set_thread_name("timer");
154
155 if (ctx->callbacks.init_thread) {
156 /* Timer thread */
157 ctx->callbacks.init_thread(ctx, 2);
158 }
159
160 /* Timer main loop */
161 d = timer_getcurrenttime(ctx);
162 while (STOP_FLAG_IS_ZERO(&ctx->stop_flag)) {
163 pthread_mutex_lock(&ctx->timers->mutex);
164 if ((ctx->timers->timer_count > 0)
165 && (d >= ctx->timers->timers[0].time)) {
166 /* Timer list is sorted. First action should run now. */
167 /* Store active timer in "t" */
168 t = ctx->timers->timers[0];
169
170 /* Shift all other timers */
171 for (u = 1; u < ctx->timers->timer_count; u++) {
172 ctx->timers->timers[u - 1] = ctx->timers->timers[u];
173 }
174 ctx->timers->timer_count--;
175
176 pthread_mutex_unlock(&ctx->timers->mutex);
177
178 /* Call timer action */
179 action_res = t.action(t.arg);
180
181 /* action_res == 1: reschedule */
182 /* action_res == 0: do not reschedule, free(arg) */
183 if ((action_res > 0) && (t.period > 0)) {
184 /* Should schedule timer again */
185 timer_add(ctx,
186 t.time + t.period,
187 t.period,
188 0,
189 t.action,
190 t.arg,
191 t.cancel);
192 } else {
193 /* Allow user to free timer argument */
194 if (t.cancel != NULL) {
195 t.cancel(t.arg);
196 }
197 }
198 continue;
199 } else {
200 pthread_mutex_unlock(&ctx->timers->mutex);
201 }
202
203 /* TIMER_RESOLUTION = 10 ms seems reasonable.
204 * A faster loop (smaller sleep value) increases CPU load,
205 * a slower loop (higher sleep value) decreases timer accuracy.
206 */
207 mg_sleep(TIMER_RESOLUTION);
208
209 d = timer_getcurrenttime(ctx);
210 }
211
212 /* Remove remaining timers */
213 for (u = 0; u < ctx->timers->timer_count; u++) {
214 t = ctx->timers->timers[u];
215 if (t.cancel != NULL) {
216 t.cancel(t.arg);
217 }
218 }
219 }
220
221
222 #if defined(_WIN32)
223 static unsigned __stdcall timer_thread(void *thread_func_param)
224 {
225 timer_thread_run(thread_func_param);
226 return 0;
227 }
228 #else
229 static void *
230 timer_thread(void *thread_func_param)
231 {
232 struct sigaction sa;
233
234 /* Ignore SIGPIPE */
235 memset(&sa, 0, sizeof(sa));
236 sa.sa_handler = SIG_IGN;
237 sigaction(SIGPIPE, &sa, NULL);
238
239 timer_thread_run(thread_func_param);
240 return NULL;
241 }
242 #endif /* _WIN32 */
243
244
245 TIMER_API int
246 timers_init(struct mg_context *ctx)
247 {
248 /* Initialize timers data structure */
249 ctx->timers =
250 (struct ttimers *)mg_calloc_ctx(sizeof(struct ttimers), 1, ctx);
251
252 if (!ctx->timers) {
253 return -1;
254 }
255 ctx->timers->timers = NULL;
256
257 /* Initialize mutex */
258 if (0 != pthread_mutex_init(&ctx->timers->mutex, NULL)) {
259 mg_free(ctx->timers);
260 ctx->timers = NULL;
261 return -1;
262 }
263
264 /* For some systems timer_getcurrenttime does some initialization
265 * during the first call. Call it once now, ignore the result. */
266 (void)timer_getcurrenttime(ctx);
267
268 /* Start timer thread */
269 if (mg_start_thread_with_id(timer_thread, ctx, &ctx->timers->threadid)
270 != 0) {
271 (void)pthread_mutex_destroy(&ctx->timers->mutex);
272 mg_free(ctx->timers);
273 ctx->timers = NULL;
274 return -1;
275 }
276
277 return 0;
278 }
279
280
281 TIMER_API void
282 timers_exit(struct mg_context *ctx)
283 {
284 if (ctx->timers) {
285 mg_join_thread(ctx->timers->threadid);
286 (void)pthread_mutex_destroy(&ctx->timers->mutex);
287 mg_free(ctx->timers->timers);
288 mg_free(ctx->timers);
289 ctx->timers = NULL;
290 }
291 }
292
293
294 /* End of timer.inl */