]> git.proxmox.com Git - mirror_ovs.git/blob - lib/poll-loop.c
rpm: improved RPM sources dir explanation
[mirror_ovs.git] / lib / poll-loop.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "poll-loop.h"
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <poll.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include "coverage.h"
25 #include "dynamic-string.h"
26 #include "fatal-signal.h"
27 #include "list.h"
28 #include "ovs-thread.h"
29 #include "seq.h"
30 #include "socket-util.h"
31 #include "timeval.h"
32 #include "vlog.h"
33 #include "hmap.h"
34 #include "hash.h"
35
36 VLOG_DEFINE_THIS_MODULE(poll_loop);
37
38 COVERAGE_DEFINE(poll_fd_wait);
39 COVERAGE_DEFINE(poll_zero_timeout);
40
41 struct poll_node {
42 struct hmap_node hmap_node;
43 struct pollfd pollfd; /* Events to pass to time_poll(). */
44 HANDLE wevent; /* Events for WaitForMultipleObjects(). */
45 const char *where; /* Where poll_node was created. */
46 };
47
48 struct poll_loop {
49 /* All active poll waiters. */
50 struct hmap poll_nodes;
51
52 /* Time at which to wake up the next call to poll_block(), LLONG_MIN to
53 * wake up immediately, or LLONG_MAX to wait forever. */
54 long long int timeout_when; /* In msecs as returned by time_msec(). */
55 const char *timeout_where; /* Where 'timeout_when' was set. */
56 };
57
58 static struct poll_loop *poll_loop(void);
59
60 /* Look up the node with same fd and wevent. */
61 static struct poll_node *
62 find_poll_node(struct poll_loop *loop, int fd, uint32_t wevent)
63 {
64 struct poll_node *node;
65
66 HMAP_FOR_EACH_WITH_HASH (node, hmap_node, hash_2words(fd, wevent),
67 &loop->poll_nodes) {
68 if (node->pollfd.fd == fd && node->wevent == wevent) {
69 return node;
70 }
71 }
72 return NULL;
73 }
74
75 /* On Unix based systems:
76 *
77 * Registers 'fd' as waiting for the specified 'events' (which should be
78 * POLLIN or POLLOUT or POLLIN | POLLOUT). The following call to
79 * poll_block() will wake up when 'fd' becomes ready for one or more of the
80 * requested events. the 'fd's are given to poll() function later.
81 *
82 * On Windows system:
83 *
84 * If both 'wevent' handle and 'fd' is specified, associate the 'fd' with
85 * with that 'wevent' for 'events' (implemented in poll_block()).
86 * In case of no 'fd' specified, wake up on any event on that 'wevent'.
87 * These wevents are given to the WaitForMultipleObjects() to be polled.
88 * The event registration is one-shot: only the following call to
89 * poll_block() is affected. The event will need to be re-registered after
90 * poll_block() is called if it is to persist.
91 *
92 * ('where' is used in debug logging. Commonly one would use poll_fd_wait() to
93 * automatically provide the caller's source file and line number for
94 * 'where'.) */
95 void
96 poll_fd_wait_at(int fd, HANDLE wevent, short int events, const char *where)
97 {
98 struct poll_loop *loop = poll_loop();
99 struct poll_node *node;
100
101 COVERAGE_INC(poll_fd_wait);
102
103 #ifdef _WIN32
104 /* Null event cannot be polled. */
105 if (wevent == 0) {
106 VLOG_ERR("No event to wait fd %d", fd);
107 return;
108 }
109 #else
110 wevent = 0;
111 #endif
112
113 /* Check for duplicate. If found, "or" the event. */
114 node = find_poll_node(loop, fd, wevent);
115 if (node) {
116 node->pollfd.events |= events;
117 } else {
118 node = xzalloc(sizeof *node);
119 hmap_insert(&loop->poll_nodes, &node->hmap_node,
120 hash_2words(fd, wevent));
121 node->pollfd.fd = fd;
122 node->pollfd.events = events;
123 node->wevent = wevent;
124 node->where = where;
125 }
126 }
127
128 /* Causes the following call to poll_block() to block for no more than 'msec'
129 * milliseconds. If 'msec' is nonpositive, the following call to poll_block()
130 * will not block at all.
131 *
132 * The timer registration is one-shot: only the following call to poll_block()
133 * is affected. The timer will need to be re-registered after poll_block() is
134 * called if it is to persist.
135 *
136 * ('where' is used in debug logging. Commonly one would use poll_timer_wait()
137 * to automatically provide the caller's source file and line number for
138 * 'where'.) */
139 void
140 poll_timer_wait_at(long long int msec, const char *where)
141 {
142 long long int now = time_msec();
143 long long int when;
144
145 if (msec <= 0) {
146 /* Wake up immediately. */
147 when = LLONG_MIN;
148 } else if ((unsigned long long int) now + msec <= LLONG_MAX) {
149 /* Normal case. */
150 when = now + msec;
151 } else {
152 /* now + msec would overflow. */
153 when = LLONG_MAX;
154 }
155
156 poll_timer_wait_until_at(when, where);
157 }
158
159 /* Causes the following call to poll_block() to wake up when the current time,
160 * as returned by time_msec(), reaches 'when' or later. If 'when' is earlier
161 * than the current time, the following call to poll_block() will not block at
162 * all.
163 *
164 * The timer registration is one-shot: only the following call to poll_block()
165 * is affected. The timer will need to be re-registered after poll_block() is
166 * called if it is to persist.
167 *
168 * ('where' is used in debug logging. Commonly one would use
169 * poll_timer_wait_until() to automatically provide the caller's source file
170 * and line number for 'where'.) */
171 void
172 poll_timer_wait_until_at(long long int when, const char *where)
173 {
174 struct poll_loop *loop = poll_loop();
175 if (when < loop->timeout_when) {
176 loop->timeout_when = when;
177 loop->timeout_where = where;
178 }
179 }
180
181 /* Causes the following call to poll_block() to wake up immediately, without
182 * blocking.
183 *
184 * ('where' is used in debug logging. Commonly one would use
185 * poll_immediate_wake() to automatically provide the caller's source file and
186 * line number for 'where'.) */
187 void
188 poll_immediate_wake_at(const char *where)
189 {
190 poll_timer_wait_at(0, where);
191 }
192
193 /* Logs, if appropriate, that the poll loop was awakened by an event
194 * registered at 'where' (typically a source file and line number). The other
195 * arguments have two possible interpretations:
196 *
197 * - If 'pollfd' is nonnull then it should be the "struct pollfd" that caused
198 * the wakeup. 'timeout' is ignored.
199 *
200 * - If 'pollfd' is NULL then 'timeout' is the number of milliseconds after
201 * which the poll loop woke up.
202 */
203 static void
204 log_wakeup(const char *where, const struct pollfd *pollfd, int timeout)
205 {
206 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
207 enum vlog_level level;
208 int cpu_usage;
209 struct ds s;
210
211 cpu_usage = get_cpu_usage();
212 if (VLOG_IS_DBG_ENABLED()) {
213 level = VLL_DBG;
214 } else if (cpu_usage > 50 && !VLOG_DROP_INFO(&rl)) {
215 level = VLL_INFO;
216 } else {
217 return;
218 }
219
220 ds_init(&s);
221 ds_put_cstr(&s, "wakeup due to ");
222 if (pollfd) {
223 char *description = describe_fd(pollfd->fd);
224 if (pollfd->revents & POLLIN) {
225 ds_put_cstr(&s, "[POLLIN]");
226 }
227 if (pollfd->revents & POLLOUT) {
228 ds_put_cstr(&s, "[POLLOUT]");
229 }
230 if (pollfd->revents & POLLERR) {
231 ds_put_cstr(&s, "[POLLERR]");
232 }
233 if (pollfd->revents & POLLHUP) {
234 ds_put_cstr(&s, "[POLLHUP]");
235 }
236 if (pollfd->revents & POLLNVAL) {
237 ds_put_cstr(&s, "[POLLNVAL]");
238 }
239 ds_put_format(&s, " on fd %d (%s)", pollfd->fd, description);
240 free(description);
241 } else {
242 ds_put_format(&s, "%d-ms timeout", timeout);
243 }
244 if (where) {
245 ds_put_format(&s, " at %s", where);
246 }
247 if (cpu_usage >= 0) {
248 ds_put_format(&s, " (%d%% CPU usage)", cpu_usage);
249 }
250 VLOG(level, "%s", ds_cstr(&s));
251 ds_destroy(&s);
252 }
253
254 static void
255 free_poll_nodes(struct poll_loop *loop)
256 {
257 struct poll_node *node, *next;
258
259 HMAP_FOR_EACH_SAFE (node, next, hmap_node, &loop->poll_nodes) {
260 hmap_remove(&loop->poll_nodes, &node->hmap_node);
261 free(node);
262 }
263 }
264
265 /* Blocks until one or more of the events registered with poll_fd_wait()
266 * occurs, or until the minimum duration registered with poll_timer_wait()
267 * elapses, or not at all if poll_immediate_wake() has been called. */
268 void
269 poll_block(void)
270 {
271 struct poll_loop *loop = poll_loop();
272 struct poll_node *node;
273 struct pollfd *pollfds;
274 HANDLE *wevents = NULL;
275 int elapsed;
276 int retval;
277 int i;
278
279 /* Register fatal signal events before actually doing any real work for
280 * poll_block. */
281 fatal_signal_wait();
282
283 if (loop->timeout_when == LLONG_MIN) {
284 COVERAGE_INC(poll_zero_timeout);
285 }
286
287 timewarp_run();
288 pollfds = xmalloc(hmap_count(&loop->poll_nodes) * sizeof *pollfds);
289
290 #ifdef _WIN32
291 wevents = xmalloc(hmap_count(&loop->poll_nodes) * sizeof *wevents);
292 #endif
293
294 /* Populate with all the fds and events. */
295 i = 0;
296 HMAP_FOR_EACH (node, hmap_node, &loop->poll_nodes) {
297 pollfds[i] = node->pollfd;
298 #ifdef _WIN32
299 wevents[i] = node->wevent;
300 if (node->pollfd.fd && node->wevent) {
301 short int wsa_events = 0;
302 if (node->pollfd.events & POLLIN) {
303 wsa_events |= FD_READ | FD_ACCEPT | FD_CLOSE;
304 }
305 if (node->pollfd.events & POLLOUT) {
306 wsa_events |= FD_WRITE | FD_CONNECT | FD_CLOSE;
307 }
308 WSAEventSelect(node->pollfd.fd, node->wevent, wsa_events);
309 }
310 #endif
311 i++;
312 }
313
314 retval = time_poll(pollfds, hmap_count(&loop->poll_nodes), wevents,
315 loop->timeout_when, &elapsed);
316 if (retval < 0) {
317 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
318 VLOG_ERR_RL(&rl, "poll: %s", ovs_strerror(-retval));
319 } else if (!retval) {
320 log_wakeup(loop->timeout_where, NULL, elapsed);
321 } else if (get_cpu_usage() > 50 || VLOG_IS_DBG_ENABLED()) {
322 i = 0;
323 HMAP_FOR_EACH (node, hmap_node, &loop->poll_nodes) {
324 if (pollfds[i].revents) {
325 log_wakeup(node->where, &pollfds[i], 0);
326 }
327 i++;
328 }
329 }
330
331 free_poll_nodes(loop);
332 loop->timeout_when = LLONG_MAX;
333 loop->timeout_where = NULL;
334 free(pollfds);
335 free(wevents);
336
337 /* Handle any pending signals before doing anything else. */
338 fatal_signal_run();
339
340 seq_woke();
341 }
342 \f
343 static void
344 free_poll_loop(void *loop_)
345 {
346 struct poll_loop *loop = loop_;
347
348 free_poll_nodes(loop);
349 hmap_destroy(&loop->poll_nodes);
350 free(loop);
351 }
352
353 static struct poll_loop *
354 poll_loop(void)
355 {
356 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
357 static pthread_key_t key;
358 struct poll_loop *loop;
359
360 if (ovsthread_once_start(&once)) {
361 xpthread_key_create(&key, free_poll_loop);
362 ovsthread_once_done(&once);
363 }
364
365 loop = pthread_getspecific(key);
366 if (!loop) {
367 loop = xzalloc(sizeof *loop);
368 hmap_init(&loop->poll_nodes);
369 xpthread_setspecific(key, loop);
370 }
371 return loop;
372 }
373