]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * This file contains the procedures for the handling of select and poll | |
3 | * | |
4 | * Created for Linux based loosely upon Mathius Lattner's minix | |
5 | * patches by Peter MacDonald. Heavily edited by Linus. | |
6 | * | |
7 | * 4 February 1994 | |
8 | * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS | |
9 | * flag set in its personality we do *not* modify the given timeout | |
10 | * parameter to reflect time remaining. | |
11 | * | |
12 | * 24 January 2000 | |
13 | * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation | |
14 | * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian). | |
15 | */ | |
16 | ||
17 | #include <linux/kernel.h> | |
18 | #include <linux/syscalls.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/slab.h> | |
21 | #include <linux/poll.h> | |
22 | #include <linux/personality.h> /* for STICKY_TIMEOUTS */ | |
23 | #include <linux/file.h> | |
24 | #include <linux/fdtable.h> | |
25 | #include <linux/fs.h> | |
26 | #include <linux/rcupdate.h> | |
27 | #include <linux/hrtimer.h> | |
28 | ||
29 | #include <asm/uaccess.h> | |
30 | ||
31 | ||
32 | /* | |
33 | * Estimate expected accuracy in ns from a timeval. | |
34 | * | |
35 | * After quite a bit of churning around, we've settled on | |
36 | * a simple thing of taking 0.1% of the timeout as the | |
37 | * slack, with a cap of 100 msec. | |
38 | * "nice" tasks get a 0.5% slack instead. | |
39 | * | |
40 | * Consider this comment an open invitation to come up with even | |
41 | * better solutions.. | |
42 | */ | |
43 | ||
44 | static long __estimate_accuracy(struct timespec *tv) | |
45 | { | |
46 | long slack; | |
47 | int divfactor = 1000; | |
48 | ||
49 | if (task_nice(current) > 0) | |
50 | divfactor = divfactor / 5; | |
51 | ||
52 | slack = tv->tv_nsec / divfactor; | |
53 | slack += tv->tv_sec * (NSEC_PER_SEC/divfactor); | |
54 | ||
55 | if (slack > 100 * NSEC_PER_MSEC) | |
56 | slack = 100 * NSEC_PER_MSEC; | |
57 | ||
58 | if (slack < 0) | |
59 | slack = 0; | |
60 | return slack; | |
61 | } | |
62 | ||
63 | static long estimate_accuracy(struct timespec *tv) | |
64 | { | |
65 | unsigned long ret; | |
66 | struct timespec now; | |
67 | ||
68 | /* | |
69 | * Realtime tasks get a slack of 0 for obvious reasons. | |
70 | */ | |
71 | ||
72 | if (rt_task(current)) | |
73 | return 0; | |
74 | ||
75 | ktime_get_ts(&now); | |
76 | now = timespec_sub(*tv, now); | |
77 | ret = __estimate_accuracy(&now); | |
78 | if (ret < current->timer_slack_ns) | |
79 | return current->timer_slack_ns; | |
80 | return ret; | |
81 | } | |
82 | ||
83 | ||
84 | ||
85 | struct poll_table_page { | |
86 | struct poll_table_page * next; | |
87 | struct poll_table_entry * entry; | |
88 | struct poll_table_entry entries[0]; | |
89 | }; | |
90 | ||
91 | #define POLL_TABLE_FULL(table) \ | |
92 | ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table)) | |
93 | ||
94 | /* | |
95 | * Ok, Peter made a complicated, but straightforward multiple_wait() function. | |
96 | * I have rewritten this, taking some shortcuts: This code may not be easy to | |
97 | * follow, but it should be free of race-conditions, and it's practical. If you | |
98 | * understand what I'm doing here, then you understand how the linux | |
99 | * sleep/wakeup mechanism works. | |
100 | * | |
101 | * Two very simple procedures, poll_wait() and poll_freewait() make all the | |
102 | * work. poll_wait() is an inline-function defined in <linux/poll.h>, | |
103 | * as all select/poll functions have to call it to add an entry to the | |
104 | * poll table. | |
105 | */ | |
106 | static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, | |
107 | poll_table *p); | |
108 | ||
109 | void poll_initwait(struct poll_wqueues *pwq) | |
110 | { | |
111 | init_poll_funcptr(&pwq->pt, __pollwait); | |
112 | pwq->polling_task = current; | |
113 | pwq->error = 0; | |
114 | pwq->table = NULL; | |
115 | pwq->inline_index = 0; | |
116 | } | |
117 | EXPORT_SYMBOL(poll_initwait); | |
118 | ||
119 | static void free_poll_entry(struct poll_table_entry *entry) | |
120 | { | |
121 | remove_wait_queue(entry->wait_address, &entry->wait); | |
122 | fput(entry->filp); | |
123 | } | |
124 | ||
125 | void poll_freewait(struct poll_wqueues *pwq) | |
126 | { | |
127 | struct poll_table_page * p = pwq->table; | |
128 | int i; | |
129 | for (i = 0; i < pwq->inline_index; i++) | |
130 | free_poll_entry(pwq->inline_entries + i); | |
131 | while (p) { | |
132 | struct poll_table_entry * entry; | |
133 | struct poll_table_page *old; | |
134 | ||
135 | entry = p->entry; | |
136 | do { | |
137 | entry--; | |
138 | free_poll_entry(entry); | |
139 | } while (entry > p->entries); | |
140 | old = p; | |
141 | p = p->next; | |
142 | free_page((unsigned long) old); | |
143 | } | |
144 | } | |
145 | EXPORT_SYMBOL(poll_freewait); | |
146 | ||
147 | static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p) | |
148 | { | |
149 | struct poll_table_page *table = p->table; | |
150 | ||
151 | if (p->inline_index < N_INLINE_POLL_ENTRIES) | |
152 | return p->inline_entries + p->inline_index++; | |
153 | ||
154 | if (!table || POLL_TABLE_FULL(table)) { | |
155 | struct poll_table_page *new_table; | |
156 | ||
157 | new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL); | |
158 | if (!new_table) { | |
159 | p->error = -ENOMEM; | |
160 | return NULL; | |
161 | } | |
162 | new_table->entry = new_table->entries; | |
163 | new_table->next = table; | |
164 | p->table = new_table; | |
165 | table = new_table; | |
166 | } | |
167 | ||
168 | return table->entry++; | |
169 | } | |
170 | ||
171 | static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) | |
172 | { | |
173 | struct poll_wqueues *pwq = wait->private; | |
174 | DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); | |
175 | ||
176 | /* | |
177 | * Although this function is called under waitqueue lock, LOCK | |
178 | * doesn't imply write barrier and the users expect write | |
179 | * barrier semantics on wakeup functions. The following | |
180 | * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() | |
181 | * and is paired with set_mb() in poll_schedule_timeout. | |
182 | */ | |
183 | smp_wmb(); | |
184 | pwq->triggered = 1; | |
185 | ||
186 | /* | |
187 | * Perform the default wake up operation using a dummy | |
188 | * waitqueue. | |
189 | * | |
190 | * TODO: This is hacky but there currently is no interface to | |
191 | * pass in @sync. @sync is scheduled to be removed and once | |
192 | * that happens, wake_up_process() can be used directly. | |
193 | */ | |
194 | return default_wake_function(&dummy_wait, mode, sync, key); | |
195 | } | |
196 | ||
197 | static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) | |
198 | { | |
199 | struct poll_table_entry *entry; | |
200 | ||
201 | entry = container_of(wait, struct poll_table_entry, wait); | |
202 | if (key && !((unsigned long)key & entry->key)) | |
203 | return 0; | |
204 | return __pollwake(wait, mode, sync, key); | |
205 | } | |
206 | ||
207 | /* Add a new entry */ | |
208 | static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, | |
209 | poll_table *p) | |
210 | { | |
211 | struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt); | |
212 | struct poll_table_entry *entry = poll_get_entry(pwq); | |
213 | if (!entry) | |
214 | return; | |
215 | get_file(filp); | |
216 | entry->filp = filp; | |
217 | entry->wait_address = wait_address; | |
218 | entry->key = p->key; | |
219 | init_waitqueue_func_entry(&entry->wait, pollwake); | |
220 | entry->wait.private = pwq; | |
221 | add_wait_queue(wait_address, &entry->wait); | |
222 | } | |
223 | ||
224 | int poll_schedule_timeout(struct poll_wqueues *pwq, int state, | |
225 | ktime_t *expires, unsigned long slack) | |
226 | { | |
227 | int rc = -EINTR; | |
228 | ||
229 | set_current_state(state); | |
230 | if (!pwq->triggered) | |
231 | rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); | |
232 | __set_current_state(TASK_RUNNING); | |
233 | ||
234 | /* | |
235 | * Prepare for the next iteration. | |
236 | * | |
237 | * The following set_mb() serves two purposes. First, it's | |
238 | * the counterpart rmb of the wmb in pollwake() such that data | |
239 | * written before wake up is always visible after wake up. | |
240 | * Second, the full barrier guarantees that triggered clearing | |
241 | * doesn't pass event check of the next iteration. Note that | |
242 | * this problem doesn't exist for the first iteration as | |
243 | * add_wait_queue() has full barrier semantics. | |
244 | */ | |
245 | set_mb(pwq->triggered, 0); | |
246 | ||
247 | return rc; | |
248 | } | |
249 | EXPORT_SYMBOL(poll_schedule_timeout); | |
250 | ||
251 | /** | |
252 | * poll_select_set_timeout - helper function to setup the timeout value | |
253 | * @to: pointer to timespec variable for the final timeout | |
254 | * @sec: seconds (from user space) | |
255 | * @nsec: nanoseconds (from user space) | |
256 | * | |
257 | * Note, we do not use a timespec for the user space value here, That | |
258 | * way we can use the function for timeval and compat interfaces as well. | |
259 | * | |
260 | * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0. | |
261 | */ | |
262 | int poll_select_set_timeout(struct timespec *to, long sec, long nsec) | |
263 | { | |
264 | struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec}; | |
265 | ||
266 | if (!timespec_valid(&ts)) | |
267 | return -EINVAL; | |
268 | ||
269 | /* Optimize for the zero timeout value here */ | |
270 | if (!sec && !nsec) { | |
271 | to->tv_sec = to->tv_nsec = 0; | |
272 | } else { | |
273 | ktime_get_ts(to); | |
274 | *to = timespec_add_safe(*to, ts); | |
275 | } | |
276 | return 0; | |
277 | } | |
278 | ||
279 | static int poll_select_copy_remaining(struct timespec *end_time, void __user *p, | |
280 | int timeval, int ret) | |
281 | { | |
282 | struct timespec rts; | |
283 | struct timeval rtv; | |
284 | ||
285 | if (!p) | |
286 | return ret; | |
287 | ||
288 | if (current->personality & STICKY_TIMEOUTS) | |
289 | goto sticky; | |
290 | ||
291 | /* No update for zero timeout */ | |
292 | if (!end_time->tv_sec && !end_time->tv_nsec) | |
293 | return ret; | |
294 | ||
295 | ktime_get_ts(&rts); | |
296 | rts = timespec_sub(*end_time, rts); | |
297 | if (rts.tv_sec < 0) | |
298 | rts.tv_sec = rts.tv_nsec = 0; | |
299 | ||
300 | if (timeval) { | |
301 | rtv.tv_sec = rts.tv_sec; | |
302 | rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC; | |
303 | ||
304 | if (!copy_to_user(p, &rtv, sizeof(rtv))) | |
305 | return ret; | |
306 | ||
307 | } else if (!copy_to_user(p, &rts, sizeof(rts))) | |
308 | return ret; | |
309 | ||
310 | /* | |
311 | * If an application puts its timeval in read-only memory, we | |
312 | * don't want the Linux-specific update to the timeval to | |
313 | * cause a fault after the select has completed | |
314 | * successfully. However, because we're not updating the | |
315 | * timeval, we can't restart the system call. | |
316 | */ | |
317 | ||
318 | sticky: | |
319 | if (ret == -ERESTARTNOHAND) | |
320 | ret = -EINTR; | |
321 | return ret; | |
322 | } | |
323 | ||
324 | #define FDS_IN(fds, n) (fds->in + n) | |
325 | #define FDS_OUT(fds, n) (fds->out + n) | |
326 | #define FDS_EX(fds, n) (fds->ex + n) | |
327 | ||
328 | #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n)) | |
329 | ||
330 | static int max_select_fd(unsigned long n, fd_set_bits *fds) | |
331 | { | |
332 | unsigned long *open_fds; | |
333 | unsigned long set; | |
334 | int max; | |
335 | struct fdtable *fdt; | |
336 | ||
337 | /* handle last in-complete long-word first */ | |
338 | set = ~(~0UL << (n & (__NFDBITS-1))); | |
339 | n /= __NFDBITS; | |
340 | fdt = files_fdtable(current->files); | |
341 | open_fds = fdt->open_fds->fds_bits+n; | |
342 | max = 0; | |
343 | if (set) { | |
344 | set &= BITS(fds, n); | |
345 | if (set) { | |
346 | if (!(set & ~*open_fds)) | |
347 | goto get_max; | |
348 | return -EBADF; | |
349 | } | |
350 | } | |
351 | while (n) { | |
352 | open_fds--; | |
353 | n--; | |
354 | set = BITS(fds, n); | |
355 | if (!set) | |
356 | continue; | |
357 | if (set & ~*open_fds) | |
358 | return -EBADF; | |
359 | if (max) | |
360 | continue; | |
361 | get_max: | |
362 | do { | |
363 | max++; | |
364 | set >>= 1; | |
365 | } while (set); | |
366 | max += n * __NFDBITS; | |
367 | } | |
368 | ||
369 | return max; | |
370 | } | |
371 | ||
372 | #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR) | |
373 | #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) | |
374 | #define POLLEX_SET (POLLPRI) | |
375 | ||
376 | static inline void wait_key_set(poll_table *wait, unsigned long in, | |
377 | unsigned long out, unsigned long bit) | |
378 | { | |
379 | if (wait) { | |
380 | wait->key = POLLEX_SET; | |
381 | if (in & bit) | |
382 | wait->key |= POLLIN_SET; | |
383 | if (out & bit) | |
384 | wait->key |= POLLOUT_SET; | |
385 | } | |
386 | } | |
387 | ||
388 | int do_select(int n, fd_set_bits *fds, struct timespec *end_time) | |
389 | { | |
390 | ktime_t expire, *to = NULL; | |
391 | struct poll_wqueues table; | |
392 | poll_table *wait; | |
393 | int retval, i, timed_out = 0; | |
394 | unsigned long slack = 0; | |
395 | ||
396 | rcu_read_lock(); | |
397 | retval = max_select_fd(n, fds); | |
398 | rcu_read_unlock(); | |
399 | ||
400 | if (retval < 0) | |
401 | return retval; | |
402 | n = retval; | |
403 | ||
404 | poll_initwait(&table); | |
405 | wait = &table.pt; | |
406 | if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { | |
407 | wait = NULL; | |
408 | timed_out = 1; | |
409 | } | |
410 | ||
411 | if (end_time && !timed_out) | |
412 | slack = estimate_accuracy(end_time); | |
413 | ||
414 | retval = 0; | |
415 | for (;;) { | |
416 | unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; | |
417 | ||
418 | inp = fds->in; outp = fds->out; exp = fds->ex; | |
419 | rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; | |
420 | ||
421 | for (i = 0; i < n; ++rinp, ++routp, ++rexp) { | |
422 | unsigned long in, out, ex, all_bits, bit = 1, mask, j; | |
423 | unsigned long res_in = 0, res_out = 0, res_ex = 0; | |
424 | const struct file_operations *f_op = NULL; | |
425 | struct file *file = NULL; | |
426 | ||
427 | in = *inp++; out = *outp++; ex = *exp++; | |
428 | all_bits = in | out | ex; | |
429 | if (all_bits == 0) { | |
430 | i += __NFDBITS; | |
431 | continue; | |
432 | } | |
433 | ||
434 | for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) { | |
435 | int fput_needed; | |
436 | if (i >= n) | |
437 | break; | |
438 | if (!(bit & all_bits)) | |
439 | continue; | |
440 | file = fget_light(i, &fput_needed); | |
441 | if (file) { | |
442 | f_op = file->f_op; | |
443 | mask = DEFAULT_POLLMASK; | |
444 | if (f_op && f_op->poll) { | |
445 | wait_key_set(wait, in, out, bit); | |
446 | mask = (*f_op->poll)(file, wait); | |
447 | } | |
448 | fput_light(file, fput_needed); | |
449 | if ((mask & POLLIN_SET) && (in & bit)) { | |
450 | res_in |= bit; | |
451 | retval++; | |
452 | wait = NULL; | |
453 | } | |
454 | if ((mask & POLLOUT_SET) && (out & bit)) { | |
455 | res_out |= bit; | |
456 | retval++; | |
457 | wait = NULL; | |
458 | } | |
459 | if ((mask & POLLEX_SET) && (ex & bit)) { | |
460 | res_ex |= bit; | |
461 | retval++; | |
462 | wait = NULL; | |
463 | } | |
464 | } | |
465 | } | |
466 | if (res_in) | |
467 | *rinp = res_in; | |
468 | if (res_out) | |
469 | *routp = res_out; | |
470 | if (res_ex) | |
471 | *rexp = res_ex; | |
472 | cond_resched(); | |
473 | } | |
474 | wait = NULL; | |
475 | if (retval || timed_out || signal_pending(current)) | |
476 | break; | |
477 | if (table.error) { | |
478 | retval = table.error; | |
479 | break; | |
480 | } | |
481 | ||
482 | /* | |
483 | * If this is the first loop and we have a timeout | |
484 | * given, then we convert to ktime_t and set the to | |
485 | * pointer to the expiry value. | |
486 | */ | |
487 | if (end_time && !to) { | |
488 | expire = timespec_to_ktime(*end_time); | |
489 | to = &expire; | |
490 | } | |
491 | ||
492 | if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE, | |
493 | to, slack)) | |
494 | timed_out = 1; | |
495 | } | |
496 | ||
497 | poll_freewait(&table); | |
498 | ||
499 | return retval; | |
500 | } | |
501 | ||
502 | /* | |
503 | * We can actually return ERESTARTSYS instead of EINTR, but I'd | |
504 | * like to be certain this leads to no problems. So I return | |
505 | * EINTR just for safety. | |
506 | * | |
507 | * Update: ERESTARTSYS breaks at least the xview clock binary, so | |
508 | * I'm trying ERESTARTNOHAND which restart only when you want to. | |
509 | */ | |
510 | #define MAX_SELECT_SECONDS \ | |
511 | ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1) | |
512 | ||
513 | int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, | |
514 | fd_set __user *exp, struct timespec *end_time) | |
515 | { | |
516 | fd_set_bits fds; | |
517 | void *bits; | |
518 | int ret, max_fds; | |
519 | unsigned int size; | |
520 | struct fdtable *fdt; | |
521 | /* Allocate small arguments on the stack to save memory and be faster */ | |
522 | long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; | |
523 | ||
524 | ret = -EINVAL; | |
525 | if (n < 0) | |
526 | goto out_nofds; | |
527 | ||
528 | /* max_fds can increase, so grab it once to avoid race */ | |
529 | rcu_read_lock(); | |
530 | fdt = files_fdtable(current->files); | |
531 | max_fds = fdt->max_fds; | |
532 | rcu_read_unlock(); | |
533 | if (n > max_fds) | |
534 | n = max_fds; | |
535 | ||
536 | /* | |
537 | * We need 6 bitmaps (in/out/ex for both incoming and outgoing), | |
538 | * since we used fdset we need to allocate memory in units of | |
539 | * long-words. | |
540 | */ | |
541 | size = FDS_BYTES(n); | |
542 | bits = stack_fds; | |
543 | if (size > sizeof(stack_fds) / 6) { | |
544 | /* Not enough space in on-stack array; must use kmalloc */ | |
545 | ret = -ENOMEM; | |
546 | bits = kmalloc(6 * size, GFP_KERNEL); | |
547 | if (!bits) | |
548 | goto out_nofds; | |
549 | } | |
550 | fds.in = bits; | |
551 | fds.out = bits + size; | |
552 | fds.ex = bits + 2*size; | |
553 | fds.res_in = bits + 3*size; | |
554 | fds.res_out = bits + 4*size; | |
555 | fds.res_ex = bits + 5*size; | |
556 | ||
557 | if ((ret = get_fd_set(n, inp, fds.in)) || | |
558 | (ret = get_fd_set(n, outp, fds.out)) || | |
559 | (ret = get_fd_set(n, exp, fds.ex))) | |
560 | goto out; | |
561 | zero_fd_set(n, fds.res_in); | |
562 | zero_fd_set(n, fds.res_out); | |
563 | zero_fd_set(n, fds.res_ex); | |
564 | ||
565 | ret = do_select(n, &fds, end_time); | |
566 | ||
567 | if (ret < 0) | |
568 | goto out; | |
569 | if (!ret) { | |
570 | ret = -ERESTARTNOHAND; | |
571 | if (signal_pending(current)) | |
572 | goto out; | |
573 | ret = 0; | |
574 | } | |
575 | ||
576 | if (set_fd_set(n, inp, fds.res_in) || | |
577 | set_fd_set(n, outp, fds.res_out) || | |
578 | set_fd_set(n, exp, fds.res_ex)) | |
579 | ret = -EFAULT; | |
580 | ||
581 | out: | |
582 | if (bits != stack_fds) | |
583 | kfree(bits); | |
584 | out_nofds: | |
585 | return ret; | |
586 | } | |
587 | ||
588 | SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, | |
589 | fd_set __user *, exp, struct timeval __user *, tvp) | |
590 | { | |
591 | struct timespec end_time, *to = NULL; | |
592 | struct timeval tv; | |
593 | int ret; | |
594 | ||
595 | if (tvp) { | |
596 | if (copy_from_user(&tv, tvp, sizeof(tv))) | |
597 | return -EFAULT; | |
598 | ||
599 | to = &end_time; | |
600 | if (poll_select_set_timeout(to, | |
601 | tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), | |
602 | (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) | |
603 | return -EINVAL; | |
604 | } | |
605 | ||
606 | ret = core_sys_select(n, inp, outp, exp, to); | |
607 | ret = poll_select_copy_remaining(&end_time, tvp, 1, ret); | |
608 | ||
609 | return ret; | |
610 | } | |
611 | ||
612 | #ifdef HAVE_SET_RESTORE_SIGMASK | |
613 | static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, | |
614 | fd_set __user *exp, struct timespec __user *tsp, | |
615 | const sigset_t __user *sigmask, size_t sigsetsize) | |
616 | { | |
617 | sigset_t ksigmask, sigsaved; | |
618 | struct timespec ts, end_time, *to = NULL; | |
619 | int ret; | |
620 | ||
621 | if (tsp) { | |
622 | if (copy_from_user(&ts, tsp, sizeof(ts))) | |
623 | return -EFAULT; | |
624 | ||
625 | to = &end_time; | |
626 | if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) | |
627 | return -EINVAL; | |
628 | } | |
629 | ||
630 | if (sigmask) { | |
631 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
632 | if (sigsetsize != sizeof(sigset_t)) | |
633 | return -EINVAL; | |
634 | if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) | |
635 | return -EFAULT; | |
636 | ||
637 | sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
638 | sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); | |
639 | } | |
640 | ||
641 | ret = core_sys_select(n, inp, outp, exp, to); | |
642 | ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); | |
643 | ||
644 | if (ret == -ERESTARTNOHAND) { | |
645 | /* | |
646 | * Don't restore the signal mask yet. Let do_signal() deliver | |
647 | * the signal on the way back to userspace, before the signal | |
648 | * mask is restored. | |
649 | */ | |
650 | if (sigmask) { | |
651 | memcpy(¤t->saved_sigmask, &sigsaved, | |
652 | sizeof(sigsaved)); | |
653 | set_restore_sigmask(); | |
654 | } | |
655 | } else if (sigmask) | |
656 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | |
657 | ||
658 | return ret; | |
659 | } | |
660 | ||
661 | /* | |
662 | * Most architectures can't handle 7-argument syscalls. So we provide a | |
663 | * 6-argument version where the sixth argument is a pointer to a structure | |
664 | * which has a pointer to the sigset_t itself followed by a size_t containing | |
665 | * the sigset size. | |
666 | */ | |
667 | SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp, | |
668 | fd_set __user *, exp, struct timespec __user *, tsp, | |
669 | void __user *, sig) | |
670 | { | |
671 | size_t sigsetsize = 0; | |
672 | sigset_t __user *up = NULL; | |
673 | ||
674 | if (sig) { | |
675 | if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t)) | |
676 | || __get_user(up, (sigset_t __user * __user *)sig) | |
677 | || __get_user(sigsetsize, | |
678 | (size_t __user *)(sig+sizeof(void *)))) | |
679 | return -EFAULT; | |
680 | } | |
681 | ||
682 | return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize); | |
683 | } | |
684 | #endif /* HAVE_SET_RESTORE_SIGMASK */ | |
685 | ||
686 | struct poll_list { | |
687 | struct poll_list *next; | |
688 | int len; | |
689 | struct pollfd entries[0]; | |
690 | }; | |
691 | ||
692 | #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd)) | |
693 | ||
694 | /* | |
695 | * Fish for pollable events on the pollfd->fd file descriptor. We're only | |
696 | * interested in events matching the pollfd->events mask, and the result | |
697 | * matching that mask is both recorded in pollfd->revents and returned. The | |
698 | * pwait poll_table will be used by the fd-provided poll handler for waiting, | |
699 | * if non-NULL. | |
700 | */ | |
701 | static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait) | |
702 | { | |
703 | unsigned int mask; | |
704 | int fd; | |
705 | ||
706 | mask = 0; | |
707 | fd = pollfd->fd; | |
708 | if (fd >= 0) { | |
709 | int fput_needed; | |
710 | struct file * file; | |
711 | ||
712 | file = fget_light(fd, &fput_needed); | |
713 | mask = POLLNVAL; | |
714 | if (file != NULL) { | |
715 | mask = DEFAULT_POLLMASK; | |
716 | if (file->f_op && file->f_op->poll) { | |
717 | if (pwait) | |
718 | pwait->key = pollfd->events | | |
719 | POLLERR | POLLHUP; | |
720 | mask = file->f_op->poll(file, pwait); | |
721 | } | |
722 | /* Mask out unneeded events. */ | |
723 | mask &= pollfd->events | POLLERR | POLLHUP; | |
724 | fput_light(file, fput_needed); | |
725 | } | |
726 | } | |
727 | pollfd->revents = mask; | |
728 | ||
729 | return mask; | |
730 | } | |
731 | ||
732 | static int do_poll(unsigned int nfds, struct poll_list *list, | |
733 | struct poll_wqueues *wait, struct timespec *end_time) | |
734 | { | |
735 | poll_table* pt = &wait->pt; | |
736 | ktime_t expire, *to = NULL; | |
737 | int timed_out = 0, count = 0; | |
738 | unsigned long slack = 0; | |
739 | ||
740 | /* Optimise the no-wait case */ | |
741 | if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { | |
742 | pt = NULL; | |
743 | timed_out = 1; | |
744 | } | |
745 | ||
746 | if (end_time && !timed_out) | |
747 | slack = estimate_accuracy(end_time); | |
748 | ||
749 | for (;;) { | |
750 | struct poll_list *walk; | |
751 | ||
752 | for (walk = list; walk != NULL; walk = walk->next) { | |
753 | struct pollfd * pfd, * pfd_end; | |
754 | ||
755 | pfd = walk->entries; | |
756 | pfd_end = pfd + walk->len; | |
757 | for (; pfd != pfd_end; pfd++) { | |
758 | /* | |
759 | * Fish for events. If we found one, record it | |
760 | * and kill the poll_table, so we don't | |
761 | * needlessly register any other waiters after | |
762 | * this. They'll get immediately deregistered | |
763 | * when we break out and return. | |
764 | */ | |
765 | if (do_pollfd(pfd, pt)) { | |
766 | count++; | |
767 | pt = NULL; | |
768 | } | |
769 | } | |
770 | } | |
771 | /* | |
772 | * All waiters have already been registered, so don't provide | |
773 | * a poll_table to them on the next loop iteration. | |
774 | */ | |
775 | pt = NULL; | |
776 | if (!count) { | |
777 | count = wait->error; | |
778 | if (signal_pending(current)) | |
779 | count = -EINTR; | |
780 | } | |
781 | if (count || timed_out) | |
782 | break; | |
783 | ||
784 | /* | |
785 | * If this is the first loop and we have a timeout | |
786 | * given, then we convert to ktime_t and set the to | |
787 | * pointer to the expiry value. | |
788 | */ | |
789 | if (end_time && !to) { | |
790 | expire = timespec_to_ktime(*end_time); | |
791 | to = &expire; | |
792 | } | |
793 | ||
794 | if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack)) | |
795 | timed_out = 1; | |
796 | } | |
797 | return count; | |
798 | } | |
799 | ||
800 | #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \ | |
801 | sizeof(struct pollfd)) | |
802 | ||
803 | int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, | |
804 | struct timespec *end_time) | |
805 | { | |
806 | struct poll_wqueues table; | |
807 | int err = -EFAULT, fdcount, len, size; | |
808 | /* Allocate small arguments on the stack to save memory and be | |
809 | faster - use long to make sure the buffer is aligned properly | |
810 | on 64 bit archs to avoid unaligned access */ | |
811 | long stack_pps[POLL_STACK_ALLOC/sizeof(long)]; | |
812 | struct poll_list *const head = (struct poll_list *)stack_pps; | |
813 | struct poll_list *walk = head; | |
814 | unsigned long todo = nfds; | |
815 | ||
816 | if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur) | |
817 | return -EINVAL; | |
818 | ||
819 | len = min_t(unsigned int, nfds, N_STACK_PPS); | |
820 | for (;;) { | |
821 | walk->next = NULL; | |
822 | walk->len = len; | |
823 | if (!len) | |
824 | break; | |
825 | ||
826 | if (copy_from_user(walk->entries, ufds + nfds-todo, | |
827 | sizeof(struct pollfd) * walk->len)) | |
828 | goto out_fds; | |
829 | ||
830 | todo -= walk->len; | |
831 | if (!todo) | |
832 | break; | |
833 | ||
834 | len = min(todo, POLLFD_PER_PAGE); | |
835 | size = sizeof(struct poll_list) + sizeof(struct pollfd) * len; | |
836 | walk = walk->next = kmalloc(size, GFP_KERNEL); | |
837 | if (!walk) { | |
838 | err = -ENOMEM; | |
839 | goto out_fds; | |
840 | } | |
841 | } | |
842 | ||
843 | poll_initwait(&table); | |
844 | fdcount = do_poll(nfds, head, &table, end_time); | |
845 | poll_freewait(&table); | |
846 | ||
847 | for (walk = head; walk; walk = walk->next) { | |
848 | struct pollfd *fds = walk->entries; | |
849 | int j; | |
850 | ||
851 | for (j = 0; j < walk->len; j++, ufds++) | |
852 | if (__put_user(fds[j].revents, &ufds->revents)) | |
853 | goto out_fds; | |
854 | } | |
855 | ||
856 | err = fdcount; | |
857 | out_fds: | |
858 | walk = head->next; | |
859 | while (walk) { | |
860 | struct poll_list *pos = walk; | |
861 | walk = walk->next; | |
862 | kfree(pos); | |
863 | } | |
864 | ||
865 | return err; | |
866 | } | |
867 | ||
868 | static long do_restart_poll(struct restart_block *restart_block) | |
869 | { | |
870 | struct pollfd __user *ufds = restart_block->poll.ufds; | |
871 | int nfds = restart_block->poll.nfds; | |
872 | struct timespec *to = NULL, end_time; | |
873 | int ret; | |
874 | ||
875 | if (restart_block->poll.has_timeout) { | |
876 | end_time.tv_sec = restart_block->poll.tv_sec; | |
877 | end_time.tv_nsec = restart_block->poll.tv_nsec; | |
878 | to = &end_time; | |
879 | } | |
880 | ||
881 | ret = do_sys_poll(ufds, nfds, to); | |
882 | ||
883 | if (ret == -EINTR) { | |
884 | restart_block->fn = do_restart_poll; | |
885 | ret = -ERESTART_RESTARTBLOCK; | |
886 | } | |
887 | return ret; | |
888 | } | |
889 | ||
890 | SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, | |
891 | long, timeout_msecs) | |
892 | { | |
893 | struct timespec end_time, *to = NULL; | |
894 | int ret; | |
895 | ||
896 | if (timeout_msecs >= 0) { | |
897 | to = &end_time; | |
898 | poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC, | |
899 | NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC)); | |
900 | } | |
901 | ||
902 | ret = do_sys_poll(ufds, nfds, to); | |
903 | ||
904 | if (ret == -EINTR) { | |
905 | struct restart_block *restart_block; | |
906 | ||
907 | restart_block = ¤t_thread_info()->restart_block; | |
908 | restart_block->fn = do_restart_poll; | |
909 | restart_block->poll.ufds = ufds; | |
910 | restart_block->poll.nfds = nfds; | |
911 | ||
912 | if (timeout_msecs >= 0) { | |
913 | restart_block->poll.tv_sec = end_time.tv_sec; | |
914 | restart_block->poll.tv_nsec = end_time.tv_nsec; | |
915 | restart_block->poll.has_timeout = 1; | |
916 | } else | |
917 | restart_block->poll.has_timeout = 0; | |
918 | ||
919 | ret = -ERESTART_RESTARTBLOCK; | |
920 | } | |
921 | return ret; | |
922 | } | |
923 | ||
924 | #ifdef HAVE_SET_RESTORE_SIGMASK | |
925 | SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, | |
926 | struct timespec __user *, tsp, const sigset_t __user *, sigmask, | |
927 | size_t, sigsetsize) | |
928 | { | |
929 | sigset_t ksigmask, sigsaved; | |
930 | struct timespec ts, end_time, *to = NULL; | |
931 | int ret; | |
932 | ||
933 | if (tsp) { | |
934 | if (copy_from_user(&ts, tsp, sizeof(ts))) | |
935 | return -EFAULT; | |
936 | ||
937 | to = &end_time; | |
938 | if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) | |
939 | return -EINVAL; | |
940 | } | |
941 | ||
942 | if (sigmask) { | |
943 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
944 | if (sigsetsize != sizeof(sigset_t)) | |
945 | return -EINVAL; | |
946 | if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) | |
947 | return -EFAULT; | |
948 | ||
949 | sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
950 | sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); | |
951 | } | |
952 | ||
953 | ret = do_sys_poll(ufds, nfds, to); | |
954 | ||
955 | /* We can restart this syscall, usually */ | |
956 | if (ret == -EINTR) { | |
957 | /* | |
958 | * Don't restore the signal mask yet. Let do_signal() deliver | |
959 | * the signal on the way back to userspace, before the signal | |
960 | * mask is restored. | |
961 | */ | |
962 | if (sigmask) { | |
963 | memcpy(¤t->saved_sigmask, &sigsaved, | |
964 | sizeof(sigsaved)); | |
965 | set_restore_sigmask(); | |
966 | } | |
967 | ret = -ERESTARTNOHAND; | |
968 | } else if (sigmask) | |
969 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | |
970 | ||
971 | ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); | |
972 | ||
973 | return ret; | |
974 | } | |
975 | #endif /* HAVE_SET_RESTORE_SIGMASK */ |