]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/file.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / fs / file.c
1 /*
2 * linux/fs/file.c
3 *
4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
5 *
6 * Manage the dynamic fd arrays in the process files_struct.
7 */
8
9 #include <linux/syscalls.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/mmzone.h>
14 #include <linux/time.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/file.h>
19 #include <linux/fdtable.h>
20 #include <linux/bitops.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
25
26 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
27 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
28 /* our min() is unusable in constant expressions ;-/ */
29 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
30 unsigned int sysctl_nr_open_max =
31 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
32
33 static void __free_fdtable(struct fdtable *fdt)
34 {
35 kvfree(fdt->fd);
36 kvfree(fdt->open_fds);
37 kfree(fdt);
38 }
39
40 static void free_fdtable_rcu(struct rcu_head *rcu)
41 {
42 __free_fdtable(container_of(rcu, struct fdtable, rcu));
43 }
44
45 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
46 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
47
48 /*
49 * Copy 'count' fd bits from the old table to the new table and clear the extra
50 * space if any. This does not copy the file pointers. Called with the files
51 * spinlock held for write.
52 */
53 static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
54 unsigned int count)
55 {
56 unsigned int cpy, set;
57
58 cpy = count / BITS_PER_BYTE;
59 set = (nfdt->max_fds - count) / BITS_PER_BYTE;
60 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
61 memset((char *)nfdt->open_fds + cpy, 0, set);
62 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
63 memset((char *)nfdt->close_on_exec + cpy, 0, set);
64
65 cpy = BITBIT_SIZE(count);
66 set = BITBIT_SIZE(nfdt->max_fds) - cpy;
67 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
68 memset((char *)nfdt->full_fds_bits + cpy, 0, set);
69 }
70
71 /*
72 * Copy all file descriptors from the old table to the new, expanded table and
73 * clear the extra space. Called with the files spinlock held for write.
74 */
75 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
76 {
77 unsigned int cpy, set;
78
79 BUG_ON(nfdt->max_fds < ofdt->max_fds);
80
81 cpy = ofdt->max_fds * sizeof(struct file *);
82 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
83 memcpy(nfdt->fd, ofdt->fd, cpy);
84 memset((char *)nfdt->fd + cpy, 0, set);
85
86 copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
87 }
88
89 static struct fdtable * alloc_fdtable(unsigned int nr)
90 {
91 struct fdtable *fdt;
92 void *data;
93
94 /*
95 * Figure out how many fds we actually want to support in this fdtable.
96 * Allocation steps are keyed to the size of the fdarray, since it
97 * grows far faster than any of the other dynamic data. We try to fit
98 * the fdarray into comfortable page-tuned chunks: starting at 1024B
99 * and growing in powers of two from there on.
100 */
101 nr /= (1024 / sizeof(struct file *));
102 nr = roundup_pow_of_two(nr + 1);
103 nr *= (1024 / sizeof(struct file *));
104 /*
105 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
106 * had been set lower between the check in expand_files() and here. Deal
107 * with that in caller, it's cheaper that way.
108 *
109 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
110 * bitmaps handling below becomes unpleasant, to put it mildly...
111 */
112 if (unlikely(nr > sysctl_nr_open))
113 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
114
115 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
116 if (!fdt)
117 goto out;
118 fdt->max_fds = nr;
119 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
120 if (!data)
121 goto out_fdt;
122 fdt->fd = data;
123
124 data = kvmalloc(max_t(size_t,
125 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
126 GFP_KERNEL_ACCOUNT);
127 if (!data)
128 goto out_arr;
129 fdt->open_fds = data;
130 data += nr / BITS_PER_BYTE;
131 fdt->close_on_exec = data;
132 data += nr / BITS_PER_BYTE;
133 fdt->full_fds_bits = data;
134
135 return fdt;
136
137 out_arr:
138 kvfree(fdt->fd);
139 out_fdt:
140 kfree(fdt);
141 out:
142 return NULL;
143 }
144
145 /*
146 * Expand the file descriptor table.
147 * This function will allocate a new fdtable and both fd array and fdset, of
148 * the given size.
149 * Return <0 error code on error; 1 on successful completion.
150 * The files->file_lock should be held on entry, and will be held on exit.
151 */
152 static int expand_fdtable(struct files_struct *files, unsigned int nr)
153 __releases(files->file_lock)
154 __acquires(files->file_lock)
155 {
156 struct fdtable *new_fdt, *cur_fdt;
157
158 spin_unlock(&files->file_lock);
159 new_fdt = alloc_fdtable(nr);
160
161 /* make sure all __fd_install() have seen resize_in_progress
162 * or have finished their rcu_read_lock_sched() section.
163 */
164 if (atomic_read(&files->count) > 1)
165 synchronize_sched();
166
167 spin_lock(&files->file_lock);
168 if (!new_fdt)
169 return -ENOMEM;
170 /*
171 * extremely unlikely race - sysctl_nr_open decreased between the check in
172 * caller and alloc_fdtable(). Cheaper to catch it here...
173 */
174 if (unlikely(new_fdt->max_fds <= nr)) {
175 __free_fdtable(new_fdt);
176 return -EMFILE;
177 }
178 cur_fdt = files_fdtable(files);
179 BUG_ON(nr < cur_fdt->max_fds);
180 copy_fdtable(new_fdt, cur_fdt);
181 rcu_assign_pointer(files->fdt, new_fdt);
182 if (cur_fdt != &files->fdtab)
183 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
184 /* coupled with smp_rmb() in __fd_install() */
185 smp_wmb();
186 return 1;
187 }
188
189 /*
190 * Expand files.
191 * This function will expand the file structures, if the requested size exceeds
192 * the current capacity and there is room for expansion.
193 * Return <0 error code on error; 0 when nothing done; 1 when files were
194 * expanded and execution may have blocked.
195 * The files->file_lock should be held on entry, and will be held on exit.
196 */
197 static int expand_files(struct files_struct *files, unsigned int nr)
198 __releases(files->file_lock)
199 __acquires(files->file_lock)
200 {
201 struct fdtable *fdt;
202 int expanded = 0;
203
204 repeat:
205 fdt = files_fdtable(files);
206
207 /* Do we need to expand? */
208 if (nr < fdt->max_fds)
209 return expanded;
210
211 /* Can we expand? */
212 if (nr >= sysctl_nr_open)
213 return -EMFILE;
214
215 if (unlikely(files->resize_in_progress)) {
216 spin_unlock(&files->file_lock);
217 expanded = 1;
218 wait_event(files->resize_wait, !files->resize_in_progress);
219 spin_lock(&files->file_lock);
220 goto repeat;
221 }
222
223 /* All good, so we try */
224 files->resize_in_progress = true;
225 expanded = expand_fdtable(files, nr);
226 files->resize_in_progress = false;
227
228 wake_up_all(&files->resize_wait);
229 return expanded;
230 }
231
232 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
233 {
234 __set_bit(fd, fdt->close_on_exec);
235 }
236
237 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
238 {
239 if (test_bit(fd, fdt->close_on_exec))
240 __clear_bit(fd, fdt->close_on_exec);
241 }
242
243 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
244 {
245 __set_bit(fd, fdt->open_fds);
246 fd /= BITS_PER_LONG;
247 if (!~fdt->open_fds[fd])
248 __set_bit(fd, fdt->full_fds_bits);
249 }
250
251 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
252 {
253 __clear_bit(fd, fdt->open_fds);
254 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
255 }
256
257 static unsigned int count_open_files(struct fdtable *fdt)
258 {
259 unsigned int size = fdt->max_fds;
260 unsigned int i;
261
262 /* Find the last open fd */
263 for (i = size / BITS_PER_LONG; i > 0; ) {
264 if (fdt->open_fds[--i])
265 break;
266 }
267 i = (i + 1) * BITS_PER_LONG;
268 return i;
269 }
270
271 /*
272 * Allocate a new files structure and copy contents from the
273 * passed in files structure.
274 * errorp will be valid only when the returned files_struct is NULL.
275 */
276 struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
277 {
278 struct files_struct *newf;
279 struct file **old_fds, **new_fds;
280 unsigned int open_files, i;
281 struct fdtable *old_fdt, *new_fdt;
282
283 *errorp = -ENOMEM;
284 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
285 if (!newf)
286 goto out;
287
288 atomic_set(&newf->count, 1);
289
290 spin_lock_init(&newf->file_lock);
291 newf->resize_in_progress = false;
292 init_waitqueue_head(&newf->resize_wait);
293 newf->next_fd = 0;
294 new_fdt = &newf->fdtab;
295 new_fdt->max_fds = NR_OPEN_DEFAULT;
296 new_fdt->close_on_exec = newf->close_on_exec_init;
297 new_fdt->open_fds = newf->open_fds_init;
298 new_fdt->full_fds_bits = newf->full_fds_bits_init;
299 new_fdt->fd = &newf->fd_array[0];
300
301 spin_lock(&oldf->file_lock);
302 old_fdt = files_fdtable(oldf);
303 open_files = count_open_files(old_fdt);
304
305 /*
306 * Check whether we need to allocate a larger fd array and fd set.
307 */
308 while (unlikely(open_files > new_fdt->max_fds)) {
309 spin_unlock(&oldf->file_lock);
310
311 if (new_fdt != &newf->fdtab)
312 __free_fdtable(new_fdt);
313
314 new_fdt = alloc_fdtable(open_files - 1);
315 if (!new_fdt) {
316 *errorp = -ENOMEM;
317 goto out_release;
318 }
319
320 /* beyond sysctl_nr_open; nothing to do */
321 if (unlikely(new_fdt->max_fds < open_files)) {
322 __free_fdtable(new_fdt);
323 *errorp = -EMFILE;
324 goto out_release;
325 }
326
327 /*
328 * Reacquire the oldf lock and a pointer to its fd table
329 * who knows it may have a new bigger fd table. We need
330 * the latest pointer.
331 */
332 spin_lock(&oldf->file_lock);
333 old_fdt = files_fdtable(oldf);
334 open_files = count_open_files(old_fdt);
335 }
336
337 copy_fd_bitmaps(new_fdt, old_fdt, open_files);
338
339 old_fds = old_fdt->fd;
340 new_fds = new_fdt->fd;
341
342 for (i = open_files; i != 0; i--) {
343 struct file *f = *old_fds++;
344 if (f) {
345 get_file(f);
346 } else {
347 /*
348 * The fd may be claimed in the fd bitmap but not yet
349 * instantiated in the files array if a sibling thread
350 * is partway through open(). So make sure that this
351 * fd is available to the new process.
352 */
353 __clear_open_fd(open_files - i, new_fdt);
354 }
355 rcu_assign_pointer(*new_fds++, f);
356 }
357 spin_unlock(&oldf->file_lock);
358
359 /* clear the remainder */
360 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
361
362 rcu_assign_pointer(newf->fdt, new_fdt);
363
364 return newf;
365
366 out_release:
367 kmem_cache_free(files_cachep, newf);
368 out:
369 return NULL;
370 }
371
372 static struct fdtable *close_files(struct files_struct * files)
373 {
374 /*
375 * It is safe to dereference the fd table without RCU or
376 * ->file_lock because this is the last reference to the
377 * files structure.
378 */
379 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
380 unsigned int i, j = 0;
381
382 for (;;) {
383 unsigned long set;
384 i = j * BITS_PER_LONG;
385 if (i >= fdt->max_fds)
386 break;
387 set = fdt->open_fds[j++];
388 while (set) {
389 if (set & 1) {
390 struct file * file = xchg(&fdt->fd[i], NULL);
391 if (file) {
392 filp_close(file, files);
393 cond_resched_rcu_qs();
394 }
395 }
396 i++;
397 set >>= 1;
398 }
399 }
400
401 return fdt;
402 }
403
404 struct files_struct *get_files_struct(struct task_struct *task)
405 {
406 struct files_struct *files;
407
408 task_lock(task);
409 files = task->files;
410 if (files)
411 atomic_inc(&files->count);
412 task_unlock(task);
413
414 return files;
415 }
416
417 void put_files_struct(struct files_struct *files)
418 {
419 if (atomic_dec_and_test(&files->count)) {
420 struct fdtable *fdt = close_files(files);
421
422 /* free the arrays if they are not embedded */
423 if (fdt != &files->fdtab)
424 __free_fdtable(fdt);
425 kmem_cache_free(files_cachep, files);
426 }
427 }
428
429 void reset_files_struct(struct files_struct *files)
430 {
431 struct task_struct *tsk = current;
432 struct files_struct *old;
433
434 old = tsk->files;
435 task_lock(tsk);
436 tsk->files = files;
437 task_unlock(tsk);
438 put_files_struct(old);
439 }
440
441 void exit_files(struct task_struct *tsk)
442 {
443 struct files_struct * files = tsk->files;
444
445 if (files) {
446 task_lock(tsk);
447 tsk->files = NULL;
448 task_unlock(tsk);
449 put_files_struct(files);
450 }
451 }
452
453 struct files_struct init_files = {
454 .count = ATOMIC_INIT(1),
455 .fdt = &init_files.fdtab,
456 .fdtab = {
457 .max_fds = NR_OPEN_DEFAULT,
458 .fd = &init_files.fd_array[0],
459 .close_on_exec = init_files.close_on_exec_init,
460 .open_fds = init_files.open_fds_init,
461 .full_fds_bits = init_files.full_fds_bits_init,
462 },
463 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
464 };
465
466 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
467 {
468 unsigned int maxfd = fdt->max_fds;
469 unsigned int maxbit = maxfd / BITS_PER_LONG;
470 unsigned int bitbit = start / BITS_PER_LONG;
471
472 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
473 if (bitbit > maxfd)
474 return maxfd;
475 if (bitbit > start)
476 start = bitbit;
477 return find_next_zero_bit(fdt->open_fds, maxfd, start);
478 }
479
480 /*
481 * allocate a file descriptor, mark it busy.
482 */
483 int __alloc_fd(struct files_struct *files,
484 unsigned start, unsigned end, unsigned flags)
485 {
486 unsigned int fd;
487 int error;
488 struct fdtable *fdt;
489
490 spin_lock(&files->file_lock);
491 repeat:
492 fdt = files_fdtable(files);
493 fd = start;
494 if (fd < files->next_fd)
495 fd = files->next_fd;
496
497 if (fd < fdt->max_fds)
498 fd = find_next_fd(fdt, fd);
499
500 /*
501 * N.B. For clone tasks sharing a files structure, this test
502 * will limit the total number of files that can be opened.
503 */
504 error = -EMFILE;
505 if (fd >= end)
506 goto out;
507
508 error = expand_files(files, fd);
509 if (error < 0)
510 goto out;
511
512 /*
513 * If we needed to expand the fs array we
514 * might have blocked - try again.
515 */
516 if (error)
517 goto repeat;
518
519 if (start <= files->next_fd)
520 files->next_fd = fd + 1;
521
522 __set_open_fd(fd, fdt);
523 if (flags & O_CLOEXEC)
524 __set_close_on_exec(fd, fdt);
525 else
526 __clear_close_on_exec(fd, fdt);
527 error = fd;
528 #if 1
529 /* Sanity check */
530 if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
531 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
532 rcu_assign_pointer(fdt->fd[fd], NULL);
533 }
534 #endif
535
536 out:
537 spin_unlock(&files->file_lock);
538 return error;
539 }
540
541 static int alloc_fd(unsigned start, unsigned flags)
542 {
543 return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
544 }
545
546 int get_unused_fd_flags(unsigned flags)
547 {
548 return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
549 }
550 EXPORT_SYMBOL(get_unused_fd_flags);
551
552 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
553 {
554 struct fdtable *fdt = files_fdtable(files);
555 __clear_open_fd(fd, fdt);
556 if (fd < files->next_fd)
557 files->next_fd = fd;
558 }
559
560 void put_unused_fd(unsigned int fd)
561 {
562 struct files_struct *files = current->files;
563 spin_lock(&files->file_lock);
564 __put_unused_fd(files, fd);
565 spin_unlock(&files->file_lock);
566 }
567
568 EXPORT_SYMBOL(put_unused_fd);
569
570 /*
571 * Install a file pointer in the fd array.
572 *
573 * The VFS is full of places where we drop the files lock between
574 * setting the open_fds bitmap and installing the file in the file
575 * array. At any such point, we are vulnerable to a dup2() race
576 * installing a file in the array before us. We need to detect this and
577 * fput() the struct file we are about to overwrite in this case.
578 *
579 * It should never happen - if we allow dup2() do it, _really_ bad things
580 * will follow.
581 *
582 * NOTE: __fd_install() variant is really, really low-level; don't
583 * use it unless you are forced to by truly lousy API shoved down
584 * your throat. 'files' *MUST* be either current->files or obtained
585 * by get_files_struct(current) done by whoever had given it to you,
586 * or really bad things will happen. Normally you want to use
587 * fd_install() instead.
588 */
589
590 void __fd_install(struct files_struct *files, unsigned int fd,
591 struct file *file)
592 {
593 struct fdtable *fdt;
594
595 might_sleep();
596 rcu_read_lock_sched();
597
598 while (unlikely(files->resize_in_progress)) {
599 rcu_read_unlock_sched();
600 wait_event(files->resize_wait, !files->resize_in_progress);
601 rcu_read_lock_sched();
602 }
603 /* coupled with smp_wmb() in expand_fdtable() */
604 smp_rmb();
605 fdt = rcu_dereference_sched(files->fdt);
606 BUG_ON(fdt->fd[fd] != NULL);
607 rcu_assign_pointer(fdt->fd[fd], file);
608 rcu_read_unlock_sched();
609 }
610
611 void fd_install(unsigned int fd, struct file *file)
612 {
613 __fd_install(current->files, fd, file);
614 }
615
616 EXPORT_SYMBOL(fd_install);
617
618 /*
619 * The same warnings as for __alloc_fd()/__fd_install() apply here...
620 */
621 int __close_fd(struct files_struct *files, unsigned fd)
622 {
623 struct file *file;
624 struct fdtable *fdt;
625
626 spin_lock(&files->file_lock);
627 fdt = files_fdtable(files);
628 if (fd >= fdt->max_fds)
629 goto out_unlock;
630 file = fdt->fd[fd];
631 if (!file)
632 goto out_unlock;
633 rcu_assign_pointer(fdt->fd[fd], NULL);
634 __clear_close_on_exec(fd, fdt);
635 __put_unused_fd(files, fd);
636 spin_unlock(&files->file_lock);
637 return filp_close(file, files);
638
639 out_unlock:
640 spin_unlock(&files->file_lock);
641 return -EBADF;
642 }
643
644 void do_close_on_exec(struct files_struct *files)
645 {
646 unsigned i;
647 struct fdtable *fdt;
648
649 /* exec unshares first */
650 spin_lock(&files->file_lock);
651 for (i = 0; ; i++) {
652 unsigned long set;
653 unsigned fd = i * BITS_PER_LONG;
654 fdt = files_fdtable(files);
655 if (fd >= fdt->max_fds)
656 break;
657 set = fdt->close_on_exec[i];
658 if (!set)
659 continue;
660 fdt->close_on_exec[i] = 0;
661 for ( ; set ; fd++, set >>= 1) {
662 struct file *file;
663 if (!(set & 1))
664 continue;
665 file = fdt->fd[fd];
666 if (!file)
667 continue;
668 rcu_assign_pointer(fdt->fd[fd], NULL);
669 __put_unused_fd(files, fd);
670 spin_unlock(&files->file_lock);
671 filp_close(file, files);
672 cond_resched();
673 spin_lock(&files->file_lock);
674 }
675
676 }
677 spin_unlock(&files->file_lock);
678 }
679
680 static struct file *__fget(unsigned int fd, fmode_t mask)
681 {
682 struct files_struct *files = current->files;
683 struct file *file;
684
685 rcu_read_lock();
686 loop:
687 file = fcheck_files(files, fd);
688 if (file) {
689 /* File object ref couldn't be taken.
690 * dup2() atomicity guarantee is the reason
691 * we loop to catch the new file (or NULL pointer)
692 */
693 if (file->f_mode & mask)
694 file = NULL;
695 else if (!get_file_rcu(file))
696 goto loop;
697 }
698 rcu_read_unlock();
699
700 return file;
701 }
702
703 struct file *fget(unsigned int fd)
704 {
705 return __fget(fd, FMODE_PATH);
706 }
707 EXPORT_SYMBOL(fget);
708
709 struct file *fget_raw(unsigned int fd)
710 {
711 return __fget(fd, 0);
712 }
713 EXPORT_SYMBOL(fget_raw);
714
715 /*
716 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
717 *
718 * You can use this instead of fget if you satisfy all of the following
719 * conditions:
720 * 1) You must call fput_light before exiting the syscall and returning control
721 * to userspace (i.e. you cannot remember the returned struct file * after
722 * returning to userspace).
723 * 2) You must not call filp_close on the returned struct file * in between
724 * calls to fget_light and fput_light.
725 * 3) You must not clone the current task in between the calls to fget_light
726 * and fput_light.
727 *
728 * The fput_needed flag returned by fget_light should be passed to the
729 * corresponding fput_light.
730 */
731 static unsigned long __fget_light(unsigned int fd, fmode_t mask)
732 {
733 struct files_struct *files = current->files;
734 struct file *file;
735
736 if (atomic_read(&files->count) == 1) {
737 file = __fcheck_files(files, fd);
738 if (!file || unlikely(file->f_mode & mask))
739 return 0;
740 return (unsigned long)file;
741 } else {
742 file = __fget(fd, mask);
743 if (!file)
744 return 0;
745 return FDPUT_FPUT | (unsigned long)file;
746 }
747 }
748 unsigned long __fdget(unsigned int fd)
749 {
750 return __fget_light(fd, FMODE_PATH);
751 }
752 EXPORT_SYMBOL(__fdget);
753
754 unsigned long __fdget_raw(unsigned int fd)
755 {
756 return __fget_light(fd, 0);
757 }
758
759 unsigned long __fdget_pos(unsigned int fd)
760 {
761 unsigned long v = __fdget(fd);
762 struct file *file = (struct file *)(v & ~3);
763
764 if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
765 if (file_count(file) > 1) {
766 v |= FDPUT_POS_UNLOCK;
767 mutex_lock(&file->f_pos_lock);
768 }
769 }
770 return v;
771 }
772
773 void __f_unlock_pos(struct file *f)
774 {
775 mutex_unlock(&f->f_pos_lock);
776 }
777
778 /*
779 * We only lock f_pos if we have threads or if the file might be
780 * shared with another process. In both cases we'll have an elevated
781 * file count (done either by fdget() or by fork()).
782 */
783
784 void set_close_on_exec(unsigned int fd, int flag)
785 {
786 struct files_struct *files = current->files;
787 struct fdtable *fdt;
788 spin_lock(&files->file_lock);
789 fdt = files_fdtable(files);
790 if (flag)
791 __set_close_on_exec(fd, fdt);
792 else
793 __clear_close_on_exec(fd, fdt);
794 spin_unlock(&files->file_lock);
795 }
796
797 bool get_close_on_exec(unsigned int fd)
798 {
799 struct files_struct *files = current->files;
800 struct fdtable *fdt;
801 bool res;
802 rcu_read_lock();
803 fdt = files_fdtable(files);
804 res = close_on_exec(fd, fdt);
805 rcu_read_unlock();
806 return res;
807 }
808
809 static int do_dup2(struct files_struct *files,
810 struct file *file, unsigned fd, unsigned flags)
811 __releases(&files->file_lock)
812 {
813 struct file *tofree;
814 struct fdtable *fdt;
815
816 /*
817 * We need to detect attempts to do dup2() over allocated but still
818 * not finished descriptor. NB: OpenBSD avoids that at the price of
819 * extra work in their equivalent of fget() - they insert struct
820 * file immediately after grabbing descriptor, mark it larval if
821 * more work (e.g. actual opening) is needed and make sure that
822 * fget() treats larval files as absent. Potentially interesting,
823 * but while extra work in fget() is trivial, locking implications
824 * and amount of surgery on open()-related paths in VFS are not.
825 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
826 * deadlocks in rather amusing ways, AFAICS. All of that is out of
827 * scope of POSIX or SUS, since neither considers shared descriptor
828 * tables and this condition does not arise without those.
829 */
830 fdt = files_fdtable(files);
831 tofree = fdt->fd[fd];
832 if (!tofree && fd_is_open(fd, fdt))
833 goto Ebusy;
834 get_file(file);
835 rcu_assign_pointer(fdt->fd[fd], file);
836 __set_open_fd(fd, fdt);
837 if (flags & O_CLOEXEC)
838 __set_close_on_exec(fd, fdt);
839 else
840 __clear_close_on_exec(fd, fdt);
841 spin_unlock(&files->file_lock);
842
843 if (tofree)
844 filp_close(tofree, files);
845
846 return fd;
847
848 Ebusy:
849 spin_unlock(&files->file_lock);
850 return -EBUSY;
851 }
852
853 int replace_fd(unsigned fd, struct file *file, unsigned flags)
854 {
855 int err;
856 struct files_struct *files = current->files;
857
858 if (!file)
859 return __close_fd(files, fd);
860
861 if (fd >= rlimit(RLIMIT_NOFILE))
862 return -EBADF;
863
864 spin_lock(&files->file_lock);
865 err = expand_files(files, fd);
866 if (unlikely(err < 0))
867 goto out_unlock;
868 return do_dup2(files, file, fd, flags);
869
870 out_unlock:
871 spin_unlock(&files->file_lock);
872 return err;
873 }
874
875 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
876 {
877 int err = -EBADF;
878 struct file *file;
879 struct files_struct *files = current->files;
880
881 if ((flags & ~O_CLOEXEC) != 0)
882 return -EINVAL;
883
884 if (unlikely(oldfd == newfd))
885 return -EINVAL;
886
887 if (newfd >= rlimit(RLIMIT_NOFILE))
888 return -EBADF;
889
890 spin_lock(&files->file_lock);
891 err = expand_files(files, newfd);
892 file = fcheck(oldfd);
893 if (unlikely(!file))
894 goto Ebadf;
895 if (unlikely(err < 0)) {
896 if (err == -EMFILE)
897 goto Ebadf;
898 goto out_unlock;
899 }
900 return do_dup2(files, file, newfd, flags);
901
902 Ebadf:
903 err = -EBADF;
904 out_unlock:
905 spin_unlock(&files->file_lock);
906 return err;
907 }
908
909 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
910 {
911 if (unlikely(newfd == oldfd)) { /* corner case */
912 struct files_struct *files = current->files;
913 int retval = oldfd;
914
915 rcu_read_lock();
916 if (!fcheck_files(files, oldfd))
917 retval = -EBADF;
918 rcu_read_unlock();
919 return retval;
920 }
921 return sys_dup3(oldfd, newfd, 0);
922 }
923
924 SYSCALL_DEFINE1(dup, unsigned int, fildes)
925 {
926 int ret = -EBADF;
927 struct file *file = fget_raw(fildes);
928
929 if (file) {
930 ret = get_unused_fd_flags(0);
931 if (ret >= 0)
932 fd_install(ret, file);
933 else
934 fput(file);
935 }
936 return ret;
937 }
938
939 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
940 {
941 int err;
942 if (from >= rlimit(RLIMIT_NOFILE))
943 return -EINVAL;
944 err = alloc_fd(from, flags);
945 if (err >= 0) {
946 get_file(file);
947 fd_install(err, file);
948 }
949 return err;
950 }
951
952 int iterate_fd(struct files_struct *files, unsigned n,
953 int (*f)(const void *, struct file *, unsigned),
954 const void *p)
955 {
956 struct fdtable *fdt;
957 int res = 0;
958 if (!files)
959 return 0;
960 spin_lock(&files->file_lock);
961 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
962 struct file *file;
963 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
964 if (!file)
965 continue;
966 res = f(p, file, n);
967 if (res)
968 break;
969 }
970 spin_unlock(&files->file_lock);
971 return res;
972 }
973 EXPORT_SYMBOL(iterate_fd);