]> git.proxmox.com Git - mirror_ovs.git/blob - lib/poll-loop.c
xenserver: Add license to uuid.py.
[mirror_ovs.git] / lib / poll-loop.c
1 /*
2 * Copyright (c) 2008, 2009, 2010 Nicira Networks.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "poll-loop.h"
19 #include <assert.h>
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <poll.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include "backtrace.h"
26 #include "coverage.h"
27 #include "dynamic-string.h"
28 #include "fatal-signal.h"
29 #include "list.h"
30 #include "timeval.h"
31 #include "vlog.h"
32
33 VLOG_DEFINE_THIS_MODULE(poll_loop)
34
35 /* An event that will wake the following call to poll_block(). */
36 struct poll_waiter {
37 /* Set when the waiter is created. */
38 struct list node; /* Element in global waiters list. */
39 int fd; /* File descriptor. */
40 short int events; /* Events to wait for (POLLIN, POLLOUT). */
41 struct backtrace *backtrace; /* Optionally, event that created waiter. */
42
43 /* Set only when poll_block() is called. */
44 struct pollfd *pollfd; /* Pointer to element of the pollfds array. */
45 };
46
47 /* All active poll waiters. */
48 static struct list waiters = LIST_INITIALIZER(&waiters);
49
50 /* Number of elements in the waiters list. */
51 static size_t n_waiters;
52
53 /* Max time to wait in next call to poll_block(), in milliseconds, or -1 to
54 * wait forever. */
55 static int timeout = -1;
56
57 /* Backtrace of 'timeout''s registration, if debugging is enabled. */
58 static struct backtrace timeout_backtrace;
59
60 static struct poll_waiter *new_waiter(int fd, short int events);
61
62 /* Registers 'fd' as waiting for the specified 'events' (which should be POLLIN
63 * or POLLOUT or POLLIN | POLLOUT). The following call to poll_block() will
64 * wake up when 'fd' becomes ready for one or more of the requested events.
65 *
66 * The event registration is one-shot: only the following call to poll_block()
67 * is affected. The event will need to be re-registered after poll_block() is
68 * called if it is to persist. */
69 struct poll_waiter *
70 poll_fd_wait(int fd, short int events)
71 {
72 COVERAGE_INC(poll_fd_wait);
73 return new_waiter(fd, events);
74 }
75
76 /* The caller must ensure that 'msec' is not negative. */
77 static void
78 poll_timer_wait__(int msec)
79 {
80 if (timeout < 0 || msec < timeout) {
81 timeout = msec;
82 if (VLOG_IS_DBG_ENABLED()) {
83 backtrace_capture(&timeout_backtrace);
84 }
85 }
86 }
87
88 /* Causes the following call to poll_block() to block for no more than 'msec'
89 * milliseconds. If 'msec' is nonpositive, the following call to poll_block()
90 * will not block at all.
91 *
92 * The timer registration is one-shot: only the following call to poll_block()
93 * is affected. The timer will need to be re-registered after poll_block() is
94 * called if it is to persist. */
95 void
96 poll_timer_wait(long long int msec)
97 {
98 poll_timer_wait__(msec < 0 ? 0
99 : msec > INT_MAX ? INT_MAX
100 : msec);
101 }
102
103 /* Causes the following call to poll_block() to wake up when the current time,
104 * as returned by time_msec(), reaches 'msec' or later. If 'msec' is earlier
105 * than the current time, the following call to poll_block() will not block at
106 * all.
107 *
108 * The timer registration is one-shot: only the following call to poll_block()
109 * is affected. The timer will need to be re-registered after poll_block() is
110 * called if it is to persist. */
111 void
112 poll_timer_wait_until(long long int msec)
113 {
114 long long int now = time_msec();
115 poll_timer_wait__(msec <= now ? 0
116 : msec < now + INT_MAX ? msec - now
117 : INT_MAX);
118 }
119
120 /* Causes the following call to poll_block() to wake up immediately, without
121 * blocking. */
122 void
123 poll_immediate_wake(void)
124 {
125 poll_timer_wait(0);
126 }
127
128 static void PRINTF_FORMAT(2, 3)
129 log_wakeup(const struct backtrace *backtrace, const char *format, ...)
130 {
131 struct ds ds;
132 va_list args;
133
134 ds_init(&ds);
135 va_start(args, format);
136 ds_put_format_valist(&ds, format, args);
137 va_end(args);
138
139 if (backtrace) {
140 int i;
141
142 ds_put_char(&ds, ':');
143 for (i = 0; i < backtrace->n_frames; i++) {
144 ds_put_format(&ds, " 0x%"PRIxPTR, backtrace->frames[i]);
145 }
146 }
147 VLOG_DBG("%s", ds_cstr(&ds));
148 ds_destroy(&ds);
149 }
150
151 /* Blocks until one or more of the events registered with poll_fd_wait()
152 * occurs, or until the minimum duration registered with poll_timer_wait()
153 * elapses, or not at all if poll_immediate_wake() has been called. */
154 void
155 poll_block(void)
156 {
157 static struct pollfd *pollfds;
158 static size_t max_pollfds;
159
160 struct poll_waiter *pw, *next;
161 int n_pollfds;
162 int retval;
163
164 /* Register fatal signal events before actually doing any real work for
165 * poll_block. */
166 fatal_signal_wait();
167
168 if (max_pollfds < n_waiters) {
169 max_pollfds = n_waiters;
170 pollfds = xrealloc(pollfds, max_pollfds * sizeof *pollfds);
171 }
172
173 n_pollfds = 0;
174 LIST_FOR_EACH (pw, struct poll_waiter, node, &waiters) {
175 pw->pollfd = &pollfds[n_pollfds];
176 pollfds[n_pollfds].fd = pw->fd;
177 pollfds[n_pollfds].events = pw->events;
178 pollfds[n_pollfds].revents = 0;
179 n_pollfds++;
180 }
181
182 if (!timeout) {
183 COVERAGE_INC(poll_zero_timeout);
184 }
185 retval = time_poll(pollfds, n_pollfds, timeout);
186 if (retval < 0) {
187 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
188 VLOG_ERR_RL(&rl, "poll: %s", strerror(-retval));
189 } else if (!retval && VLOG_IS_DBG_ENABLED()) {
190 log_wakeup(&timeout_backtrace, "%d-ms timeout", timeout);
191 }
192
193 LIST_FOR_EACH_SAFE (pw, next, struct poll_waiter, node, &waiters) {
194 if (pw->pollfd->revents && VLOG_IS_DBG_ENABLED()) {
195 log_wakeup(pw->backtrace, "%s%s%s%s%s on fd %d",
196 pw->pollfd->revents & POLLIN ? "[POLLIN]" : "",
197 pw->pollfd->revents & POLLOUT ? "[POLLOUT]" : "",
198 pw->pollfd->revents & POLLERR ? "[POLLERR]" : "",
199 pw->pollfd->revents & POLLHUP ? "[POLLHUP]" : "",
200 pw->pollfd->revents & POLLNVAL ? "[POLLNVAL]" : "",
201 pw->fd);
202 }
203 poll_cancel(pw);
204 }
205
206 timeout = -1;
207 timeout_backtrace.n_frames = 0;
208
209 /* Handle any pending signals before doing anything else. */
210 fatal_signal_run();
211 }
212
213 /* Cancels the file descriptor event registered with poll_fd_wait() using 'pw',
214 * the struct poll_waiter returned by that function.
215 *
216 * An event registered with poll_fd_wait() may be canceled from its time of
217 * registration until the next call to poll_block(). At that point, the event
218 * is automatically canceled by the system and its poll_waiter is freed. */
219 void
220 poll_cancel(struct poll_waiter *pw)
221 {
222 if (pw) {
223 list_remove(&pw->node);
224 free(pw->backtrace);
225 free(pw);
226 n_waiters--;
227 }
228 }
229 \f
230 /* Creates and returns a new poll_waiter for 'fd' and 'events'. */
231 static struct poll_waiter *
232 new_waiter(int fd, short int events)
233 {
234 struct poll_waiter *waiter = xzalloc(sizeof *waiter);
235 assert(fd >= 0);
236 waiter->fd = fd;
237 waiter->events = events;
238 if (VLOG_IS_DBG_ENABLED()) {
239 waiter->backtrace = xmalloc(sizeof *waiter->backtrace);
240 backtrace_capture(waiter->backtrace);
241 }
242 list_push_back(&waiters, &waiter->node);
243 n_waiters++;
244 return waiter;
245 }