1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
7 * Manage the dynamic fd arrays in the process files_struct.
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
13 #include <linux/kernel.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/close_range.h>
27 unsigned int sysctl_nr_open __read_mostly
= 1024*1024;
28 unsigned int sysctl_nr_open_min
= BITS_PER_LONG
;
29 /* our min() is unusable in constant expressions ;-/ */
30 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
31 unsigned int sysctl_nr_open_max
=
32 __const_min(INT_MAX
, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG
;
34 static void __free_fdtable(struct fdtable
*fdt
)
37 kvfree(fdt
->open_fds
);
41 static void free_fdtable_rcu(struct rcu_head
*rcu
)
43 __free_fdtable(container_of(rcu
, struct fdtable
, rcu
));
46 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
47 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
50 * Copy 'count' fd bits from the old table to the new table and clear the extra
51 * space if any. This does not copy the file pointers. Called with the files
52 * spinlock held for write.
54 static void copy_fd_bitmaps(struct fdtable
*nfdt
, struct fdtable
*ofdt
,
57 unsigned int cpy
, set
;
59 cpy
= count
/ BITS_PER_BYTE
;
60 set
= (nfdt
->max_fds
- count
) / BITS_PER_BYTE
;
61 memcpy(nfdt
->open_fds
, ofdt
->open_fds
, cpy
);
62 memset((char *)nfdt
->open_fds
+ cpy
, 0, set
);
63 memcpy(nfdt
->close_on_exec
, ofdt
->close_on_exec
, cpy
);
64 memset((char *)nfdt
->close_on_exec
+ cpy
, 0, set
);
66 cpy
= BITBIT_SIZE(count
);
67 set
= BITBIT_SIZE(nfdt
->max_fds
) - cpy
;
68 memcpy(nfdt
->full_fds_bits
, ofdt
->full_fds_bits
, cpy
);
69 memset((char *)nfdt
->full_fds_bits
+ cpy
, 0, set
);
73 * Copy all file descriptors from the old table to the new, expanded table and
74 * clear the extra space. Called with the files spinlock held for write.
76 static void copy_fdtable(struct fdtable
*nfdt
, struct fdtable
*ofdt
)
80 BUG_ON(nfdt
->max_fds
< ofdt
->max_fds
);
82 cpy
= ofdt
->max_fds
* sizeof(struct file
*);
83 set
= (nfdt
->max_fds
- ofdt
->max_fds
) * sizeof(struct file
*);
84 memcpy(nfdt
->fd
, ofdt
->fd
, cpy
);
85 memset((char *)nfdt
->fd
+ cpy
, 0, set
);
87 copy_fd_bitmaps(nfdt
, ofdt
, ofdt
->max_fds
);
91 * Note how the fdtable bitmap allocations very much have to be a multiple of
92 * BITS_PER_LONG. This is not only because we walk those things in chunks of
93 * 'unsigned long' in some places, but simply because that is how the Linux
94 * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
95 * they are very much "bits in an array of unsigned long".
97 * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied
98 * by that "1024/sizeof(ptr)" before, we already know there are sufficient
99 * clear low bits. Clang seems to realize that, gcc ends up being confused.
101 * On a 128-bit machine, the ALIGN() would actually matter. In the meantime,
102 * let's consider it documentation (and maybe a test-case for gcc to improve
103 * its code generation ;)
105 static struct fdtable
* alloc_fdtable(unsigned int nr
)
111 * Figure out how many fds we actually want to support in this fdtable.
112 * Allocation steps are keyed to the size of the fdarray, since it
113 * grows far faster than any of the other dynamic data. We try to fit
114 * the fdarray into comfortable page-tuned chunks: starting at 1024B
115 * and growing in powers of two from there on.
117 nr
/= (1024 / sizeof(struct file
*));
118 nr
= roundup_pow_of_two(nr
+ 1);
119 nr
*= (1024 / sizeof(struct file
*));
120 nr
= ALIGN(nr
, BITS_PER_LONG
);
122 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
123 * had been set lower between the check in expand_files() and here. Deal
124 * with that in caller, it's cheaper that way.
126 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
127 * bitmaps handling below becomes unpleasant, to put it mildly...
129 if (unlikely(nr
> sysctl_nr_open
))
130 nr
= ((sysctl_nr_open
- 1) | (BITS_PER_LONG
- 1)) + 1;
132 fdt
= kmalloc(sizeof(struct fdtable
), GFP_KERNEL_ACCOUNT
);
136 data
= kvmalloc_array(nr
, sizeof(struct file
*), GFP_KERNEL_ACCOUNT
);
141 data
= kvmalloc(max_t(size_t,
142 2 * nr
/ BITS_PER_BYTE
+ BITBIT_SIZE(nr
), L1_CACHE_BYTES
),
146 fdt
->open_fds
= data
;
147 data
+= nr
/ BITS_PER_BYTE
;
148 fdt
->close_on_exec
= data
;
149 data
+= nr
/ BITS_PER_BYTE
;
150 fdt
->full_fds_bits
= data
;
163 * Expand the file descriptor table.
164 * This function will allocate a new fdtable and both fd array and fdset, of
166 * Return <0 error code on error; 1 on successful completion.
167 * The files->file_lock should be held on entry, and will be held on exit.
169 static int expand_fdtable(struct files_struct
*files
, unsigned int nr
)
170 __releases(files
->file_lock
)
171 __acquires(files
->file_lock
)
173 struct fdtable
*new_fdt
, *cur_fdt
;
175 spin_unlock(&files
->file_lock
);
176 new_fdt
= alloc_fdtable(nr
);
178 /* make sure all fd_install() have seen resize_in_progress
179 * or have finished their rcu_read_lock_sched() section.
181 if (atomic_read(&files
->count
) > 1)
184 spin_lock(&files
->file_lock
);
188 * extremely unlikely race - sysctl_nr_open decreased between the check in
189 * caller and alloc_fdtable(). Cheaper to catch it here...
191 if (unlikely(new_fdt
->max_fds
<= nr
)) {
192 __free_fdtable(new_fdt
);
195 cur_fdt
= files_fdtable(files
);
196 BUG_ON(nr
< cur_fdt
->max_fds
);
197 copy_fdtable(new_fdt
, cur_fdt
);
198 rcu_assign_pointer(files
->fdt
, new_fdt
);
199 if (cur_fdt
!= &files
->fdtab
)
200 call_rcu(&cur_fdt
->rcu
, free_fdtable_rcu
);
201 /* coupled with smp_rmb() in fd_install() */
208 * This function will expand the file structures, if the requested size exceeds
209 * the current capacity and there is room for expansion.
210 * Return <0 error code on error; 0 when nothing done; 1 when files were
211 * expanded and execution may have blocked.
212 * The files->file_lock should be held on entry, and will be held on exit.
214 static int expand_files(struct files_struct
*files
, unsigned int nr
)
215 __releases(files
->file_lock
)
216 __acquires(files
->file_lock
)
222 fdt
= files_fdtable(files
);
224 /* Do we need to expand? */
225 if (nr
< fdt
->max_fds
)
229 if (nr
>= sysctl_nr_open
)
232 if (unlikely(files
->resize_in_progress
)) {
233 spin_unlock(&files
->file_lock
);
235 wait_event(files
->resize_wait
, !files
->resize_in_progress
);
236 spin_lock(&files
->file_lock
);
240 /* All good, so we try */
241 files
->resize_in_progress
= true;
242 expanded
= expand_fdtable(files
, nr
);
243 files
->resize_in_progress
= false;
245 wake_up_all(&files
->resize_wait
);
249 static inline void __set_close_on_exec(unsigned int fd
, struct fdtable
*fdt
)
251 __set_bit(fd
, fdt
->close_on_exec
);
254 static inline void __clear_close_on_exec(unsigned int fd
, struct fdtable
*fdt
)
256 if (test_bit(fd
, fdt
->close_on_exec
))
257 __clear_bit(fd
, fdt
->close_on_exec
);
260 static inline void __set_open_fd(unsigned int fd
, struct fdtable
*fdt
)
262 __set_bit(fd
, fdt
->open_fds
);
264 if (!~fdt
->open_fds
[fd
])
265 __set_bit(fd
, fdt
->full_fds_bits
);
268 static inline void __clear_open_fd(unsigned int fd
, struct fdtable
*fdt
)
270 __clear_bit(fd
, fdt
->open_fds
);
271 __clear_bit(fd
/ BITS_PER_LONG
, fdt
->full_fds_bits
);
274 static unsigned int count_open_files(struct fdtable
*fdt
)
276 unsigned int size
= fdt
->max_fds
;
279 /* Find the last open fd */
280 for (i
= size
/ BITS_PER_LONG
; i
> 0; ) {
281 if (fdt
->open_fds
[--i
])
284 i
= (i
+ 1) * BITS_PER_LONG
;
289 * Note that a sane fdtable size always has to be a multiple of
290 * BITS_PER_LONG, since we have bitmaps that are sized by this.
292 * 'max_fds' will normally already be properly aligned, but it
293 * turns out that in the close_range() -> __close_range() ->
294 * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end
295 * up having a 'max_fds' value that isn't already aligned.
297 * Rather than make close_range() have to worry about this,
298 * just make that BITS_PER_LONG alignment be part of a sane
299 * fdtable size. Becuase that's really what it is.
301 static unsigned int sane_fdtable_size(struct fdtable
*fdt
, unsigned int max_fds
)
305 count
= count_open_files(fdt
);
306 max_fds
= ALIGN(max_fds
, BITS_PER_LONG
);
307 if (max_fds
< NR_OPEN_DEFAULT
)
308 max_fds
= NR_OPEN_DEFAULT
;
309 return min(count
, max_fds
);
313 * Allocate a new files structure and copy contents from the
314 * passed in files structure.
315 * errorp will be valid only when the returned files_struct is NULL.
317 struct files_struct
*dup_fd(struct files_struct
*oldf
, unsigned int max_fds
, int *errorp
)
319 struct files_struct
*newf
;
320 struct file
**old_fds
, **new_fds
;
321 unsigned int open_files
, i
;
322 struct fdtable
*old_fdt
, *new_fdt
;
325 newf
= kmem_cache_alloc(files_cachep
, GFP_KERNEL
);
329 atomic_set(&newf
->count
, 1);
331 spin_lock_init(&newf
->file_lock
);
332 newf
->resize_in_progress
= false;
333 init_waitqueue_head(&newf
->resize_wait
);
335 new_fdt
= &newf
->fdtab
;
336 new_fdt
->max_fds
= NR_OPEN_DEFAULT
;
337 new_fdt
->close_on_exec
= newf
->close_on_exec_init
;
338 new_fdt
->open_fds
= newf
->open_fds_init
;
339 new_fdt
->full_fds_bits
= newf
->full_fds_bits_init
;
340 new_fdt
->fd
= &newf
->fd_array
[0];
342 spin_lock(&oldf
->file_lock
);
343 old_fdt
= files_fdtable(oldf
);
344 open_files
= sane_fdtable_size(old_fdt
, max_fds
);
347 * Check whether we need to allocate a larger fd array and fd set.
349 while (unlikely(open_files
> new_fdt
->max_fds
)) {
350 spin_unlock(&oldf
->file_lock
);
352 if (new_fdt
!= &newf
->fdtab
)
353 __free_fdtable(new_fdt
);
355 new_fdt
= alloc_fdtable(open_files
- 1);
361 /* beyond sysctl_nr_open; nothing to do */
362 if (unlikely(new_fdt
->max_fds
< open_files
)) {
363 __free_fdtable(new_fdt
);
369 * Reacquire the oldf lock and a pointer to its fd table
370 * who knows it may have a new bigger fd table. We need
371 * the latest pointer.
373 spin_lock(&oldf
->file_lock
);
374 old_fdt
= files_fdtable(oldf
);
375 open_files
= sane_fdtable_size(old_fdt
, max_fds
);
378 copy_fd_bitmaps(new_fdt
, old_fdt
, open_files
);
380 old_fds
= old_fdt
->fd
;
381 new_fds
= new_fdt
->fd
;
383 for (i
= open_files
; i
!= 0; i
--) {
384 struct file
*f
= *old_fds
++;
389 * The fd may be claimed in the fd bitmap but not yet
390 * instantiated in the files array if a sibling thread
391 * is partway through open(). So make sure that this
392 * fd is available to the new process.
394 __clear_open_fd(open_files
- i
, new_fdt
);
396 rcu_assign_pointer(*new_fds
++, f
);
398 spin_unlock(&oldf
->file_lock
);
400 /* clear the remainder */
401 memset(new_fds
, 0, (new_fdt
->max_fds
- open_files
) * sizeof(struct file
*));
403 rcu_assign_pointer(newf
->fdt
, new_fdt
);
408 kmem_cache_free(files_cachep
, newf
);
413 static struct fdtable
*close_files(struct files_struct
* files
)
416 * It is safe to dereference the fd table without RCU or
417 * ->file_lock because this is the last reference to the
420 struct fdtable
*fdt
= rcu_dereference_raw(files
->fdt
);
421 unsigned int i
, j
= 0;
425 i
= j
* BITS_PER_LONG
;
426 if (i
>= fdt
->max_fds
)
428 set
= fdt
->open_fds
[j
++];
431 struct file
* file
= xchg(&fdt
->fd
[i
], NULL
);
433 filp_close(file
, files
);
445 void put_files_struct(struct files_struct
*files
)
447 if (atomic_dec_and_test(&files
->count
)) {
448 struct fdtable
*fdt
= close_files(files
);
450 /* free the arrays if they are not embedded */
451 if (fdt
!= &files
->fdtab
)
453 kmem_cache_free(files_cachep
, files
);
457 void exit_files(struct task_struct
*tsk
)
459 struct files_struct
* files
= tsk
->files
;
465 put_files_struct(files
);
469 struct files_struct init_files
= {
470 .count
= ATOMIC_INIT(1),
471 .fdt
= &init_files
.fdtab
,
473 .max_fds
= NR_OPEN_DEFAULT
,
474 .fd
= &init_files
.fd_array
[0],
475 .close_on_exec
= init_files
.close_on_exec_init
,
476 .open_fds
= init_files
.open_fds_init
,
477 .full_fds_bits
= init_files
.full_fds_bits_init
,
479 .file_lock
= __SPIN_LOCK_UNLOCKED(init_files
.file_lock
),
480 .resize_wait
= __WAIT_QUEUE_HEAD_INITIALIZER(init_files
.resize_wait
),
483 static unsigned int find_next_fd(struct fdtable
*fdt
, unsigned int start
)
485 unsigned int maxfd
= fdt
->max_fds
;
486 unsigned int maxbit
= maxfd
/ BITS_PER_LONG
;
487 unsigned int bitbit
= start
/ BITS_PER_LONG
;
489 bitbit
= find_next_zero_bit(fdt
->full_fds_bits
, maxbit
, bitbit
) * BITS_PER_LONG
;
494 return find_next_zero_bit(fdt
->open_fds
, maxfd
, start
);
498 * allocate a file descriptor, mark it busy.
500 static int alloc_fd(unsigned start
, unsigned end
, unsigned flags
)
502 struct files_struct
*files
= current
->files
;
507 spin_lock(&files
->file_lock
);
509 fdt
= files_fdtable(files
);
511 if (fd
< files
->next_fd
)
514 if (fd
< fdt
->max_fds
)
515 fd
= find_next_fd(fdt
, fd
);
518 * N.B. For clone tasks sharing a files structure, this test
519 * will limit the total number of files that can be opened.
525 error
= expand_files(files
, fd
);
530 * If we needed to expand the fs array we
531 * might have blocked - try again.
536 if (start
<= files
->next_fd
)
537 files
->next_fd
= fd
+ 1;
539 __set_open_fd(fd
, fdt
);
540 if (flags
& O_CLOEXEC
)
541 __set_close_on_exec(fd
, fdt
);
543 __clear_close_on_exec(fd
, fdt
);
547 if (rcu_access_pointer(fdt
->fd
[fd
]) != NULL
) {
548 printk(KERN_WARNING
"alloc_fd: slot %d not NULL!\n", fd
);
549 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
554 spin_unlock(&files
->file_lock
);
558 int __get_unused_fd_flags(unsigned flags
, unsigned long nofile
)
560 return alloc_fd(0, nofile
, flags
);
563 int get_unused_fd_flags(unsigned flags
)
565 return __get_unused_fd_flags(flags
, rlimit(RLIMIT_NOFILE
));
567 EXPORT_SYMBOL(get_unused_fd_flags
);
569 static void __put_unused_fd(struct files_struct
*files
, unsigned int fd
)
571 struct fdtable
*fdt
= files_fdtable(files
);
572 __clear_open_fd(fd
, fdt
);
573 if (fd
< files
->next_fd
)
577 void put_unused_fd(unsigned int fd
)
579 struct files_struct
*files
= current
->files
;
580 spin_lock(&files
->file_lock
);
581 __put_unused_fd(files
, fd
);
582 spin_unlock(&files
->file_lock
);
585 EXPORT_SYMBOL(put_unused_fd
);
588 * Install a file pointer in the fd array.
590 * The VFS is full of places where we drop the files lock between
591 * setting the open_fds bitmap and installing the file in the file
592 * array. At any such point, we are vulnerable to a dup2() race
593 * installing a file in the array before us. We need to detect this and
594 * fput() the struct file we are about to overwrite in this case.
596 * It should never happen - if we allow dup2() do it, _really_ bad things
599 * This consumes the "file" refcount, so callers should treat it
600 * as if they had called fput(file).
603 void fd_install(unsigned int fd
, struct file
*file
)
605 struct files_struct
*files
= current
->files
;
608 rcu_read_lock_sched();
610 if (unlikely(files
->resize_in_progress
)) {
611 rcu_read_unlock_sched();
612 spin_lock(&files
->file_lock
);
613 fdt
= files_fdtable(files
);
614 BUG_ON(fdt
->fd
[fd
] != NULL
);
615 rcu_assign_pointer(fdt
->fd
[fd
], file
);
616 spin_unlock(&files
->file_lock
);
619 /* coupled with smp_wmb() in expand_fdtable() */
621 fdt
= rcu_dereference_sched(files
->fdt
);
622 BUG_ON(fdt
->fd
[fd
] != NULL
);
623 rcu_assign_pointer(fdt
->fd
[fd
], file
);
624 rcu_read_unlock_sched();
627 EXPORT_SYMBOL(fd_install
);
630 * pick_file - return file associatd with fd
631 * @files: file struct to retrieve file from
632 * @fd: file descriptor to retrieve file for
634 * If this functions returns an EINVAL error pointer the fd was beyond the
635 * current maximum number of file descriptors for that fdtable.
637 * Returns: The file associated with @fd, on error returns an error pointer.
639 static struct file
*pick_file(struct files_struct
*files
, unsigned fd
)
644 spin_lock(&files
->file_lock
);
645 fdt
= files_fdtable(files
);
646 if (fd
>= fdt
->max_fds
) {
647 file
= ERR_PTR(-EINVAL
);
652 file
= ERR_PTR(-EBADF
);
655 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
656 __put_unused_fd(files
, fd
);
659 spin_unlock(&files
->file_lock
);
663 int close_fd(unsigned fd
)
665 struct files_struct
*files
= current
->files
;
668 file
= pick_file(files
, fd
);
672 return filp_close(file
, files
);
674 EXPORT_SYMBOL(close_fd
); /* for ksys_close() */
677 * last_fd - return last valid index into fd table
678 * @cur_fds: files struct
680 * Context: Either rcu read lock or files_lock must be held.
682 * Returns: Last valid index into fdtable.
684 static inline unsigned last_fd(struct fdtable
*fdt
)
686 return fdt
->max_fds
- 1;
689 static inline void __range_cloexec(struct files_struct
*cur_fds
,
690 unsigned int fd
, unsigned int max_fd
)
694 /* make sure we're using the correct maximum value */
695 spin_lock(&cur_fds
->file_lock
);
696 fdt
= files_fdtable(cur_fds
);
697 max_fd
= min(last_fd(fdt
), max_fd
);
699 bitmap_set(fdt
->close_on_exec
, fd
, max_fd
- fd
+ 1);
700 spin_unlock(&cur_fds
->file_lock
);
703 static inline void __range_close(struct files_struct
*cur_fds
, unsigned int fd
,
706 while (fd
<= max_fd
) {
709 file
= pick_file(cur_fds
, fd
++);
711 /* found a valid file to close */
712 filp_close(file
, cur_fds
);
717 /* beyond the last fd in that table */
718 if (PTR_ERR(file
) == -EINVAL
)
724 * __close_range() - Close all file descriptors in a given range.
726 * @fd: starting file descriptor to close
727 * @max_fd: last file descriptor to close
729 * This closes a range of file descriptors. All file descriptors
730 * from @fd up to and including @max_fd are closed.
732 int __close_range(unsigned fd
, unsigned max_fd
, unsigned int flags
)
734 struct task_struct
*me
= current
;
735 struct files_struct
*cur_fds
= me
->files
, *fds
= NULL
;
737 if (flags
& ~(CLOSE_RANGE_UNSHARE
| CLOSE_RANGE_CLOEXEC
))
743 if (flags
& CLOSE_RANGE_UNSHARE
) {
745 unsigned int max_unshare_fds
= NR_OPEN_MAX
;
748 * If the caller requested all fds to be made cloexec we always
749 * copy all of the file descriptors since they still want to
752 if (!(flags
& CLOSE_RANGE_CLOEXEC
)) {
754 * If the requested range is greater than the current
755 * maximum, we're closing everything so only copy all
756 * file descriptors beneath the lowest file descriptor.
759 if (max_fd
>= last_fd(files_fdtable(cur_fds
)))
760 max_unshare_fds
= fd
;
764 ret
= unshare_fd(CLONE_FILES
, max_unshare_fds
, &fds
);
769 * We used to share our file descriptor table, and have now
770 * created a private one, make sure we're using it below.
776 if (flags
& CLOSE_RANGE_CLOEXEC
)
777 __range_cloexec(cur_fds
, fd
, max_fd
);
779 __range_close(cur_fds
, fd
, max_fd
);
783 * We're done closing the files we were supposed to. Time to install
784 * the new file descriptor table and drop the old one.
789 put_files_struct(fds
);
796 * See close_fd_get_file() below, this variant assumes current->files->file_lock
799 int __close_fd_get_file(unsigned int fd
, struct file
**res
)
801 struct files_struct
*files
= current
->files
;
805 fdt
= files_fdtable(files
);
806 if (fd
>= fdt
->max_fds
)
811 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
812 __put_unused_fd(files
, fd
);
820 EXPORT_SYMBOL(close_fd_get_file
);
823 * variant of close_fd that gets a ref on the file for later fput.
824 * The caller must ensure that filp_close() called on the file, and then
827 int close_fd_get_file(unsigned int fd
, struct file
**res
)
829 struct files_struct
*files
= current
->files
;
832 spin_lock(&files
->file_lock
);
833 ret
= __close_fd_get_file(fd
, res
);
834 spin_unlock(&files
->file_lock
);
839 void do_close_on_exec(struct files_struct
*files
)
844 /* exec unshares first */
845 spin_lock(&files
->file_lock
);
848 unsigned fd
= i
* BITS_PER_LONG
;
849 fdt
= files_fdtable(files
);
850 if (fd
>= fdt
->max_fds
)
852 set
= fdt
->close_on_exec
[i
];
855 fdt
->close_on_exec
[i
] = 0;
856 for ( ; set
; fd
++, set
>>= 1) {
863 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
864 __put_unused_fd(files
, fd
);
865 spin_unlock(&files
->file_lock
);
866 filp_close(file
, files
);
868 spin_lock(&files
->file_lock
);
872 spin_unlock(&files
->file_lock
);
875 static inline struct file
*__fget_files_rcu(struct files_struct
*files
,
876 unsigned int fd
, fmode_t mask
, unsigned int refs
)
880 struct fdtable
*fdt
= rcu_dereference_raw(files
->fdt
);
881 struct file __rcu
**fdentry
;
883 if (unlikely(fd
>= fdt
->max_fds
))
886 fdentry
= fdt
->fd
+ array_index_nospec(fd
, fdt
->max_fds
);
887 file
= rcu_dereference_raw(*fdentry
);
891 if (unlikely(file
->f_mode
& mask
))
895 * Ok, we have a file pointer. However, because we do
896 * this all locklessly under RCU, we may be racing with
897 * that file being closed.
899 * Such a race can take two forms:
901 * (a) the file ref already went down to zero,
902 * and get_file_rcu_many() fails. Just try
905 if (unlikely(!get_file_rcu_many(file
, refs
)))
909 * (b) the file table entry has changed under us.
910 * Note that we don't need to re-check the 'fdt->fd'
911 * pointer having changed, because it always goes
912 * hand-in-hand with 'fdt'.
914 * If so, we need to put our refs and try again.
916 if (unlikely(rcu_dereference_raw(files
->fdt
) != fdt
) ||
917 unlikely(rcu_dereference_raw(*fdentry
) != file
)) {
918 fput_many(file
, refs
);
923 * Ok, we have a ref to the file, and checked that it
930 static struct file
*__fget_files(struct files_struct
*files
, unsigned int fd
,
931 fmode_t mask
, unsigned int refs
)
936 file
= __fget_files_rcu(files
, fd
, mask
, refs
);
942 static inline struct file
*__fget(unsigned int fd
, fmode_t mask
,
945 return __fget_files(current
->files
, fd
, mask
, refs
);
948 struct file
*fget_many(unsigned int fd
, unsigned int refs
)
950 return __fget(fd
, FMODE_PATH
, refs
);
953 struct file
*fget(unsigned int fd
)
955 return __fget(fd
, FMODE_PATH
, 1);
959 struct file
*fget_raw(unsigned int fd
)
961 return __fget(fd
, 0, 1);
963 EXPORT_SYMBOL(fget_raw
);
965 struct file
*fget_task(struct task_struct
*task
, unsigned int fd
)
967 struct file
*file
= NULL
;
971 file
= __fget_files(task
->files
, fd
, 0, 1);
977 struct file
*task_lookup_fd_rcu(struct task_struct
*task
, unsigned int fd
)
979 /* Must be called with rcu_read_lock held */
980 struct files_struct
*files
;
981 struct file
*file
= NULL
;
986 file
= files_lookup_fd_rcu(files
, fd
);
992 struct file
*task_lookup_next_fd_rcu(struct task_struct
*task
, unsigned int *ret_fd
)
994 /* Must be called with rcu_read_lock held */
995 struct files_struct
*files
;
996 unsigned int fd
= *ret_fd
;
997 struct file
*file
= NULL
;
1000 files
= task
->files
;
1002 for (; fd
< files_fdtable(files
)->max_fds
; fd
++) {
1003 file
= files_lookup_fd_rcu(files
, fd
);
1014 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
1016 * You can use this instead of fget if you satisfy all of the following
1018 * 1) You must call fput_light before exiting the syscall and returning control
1019 * to userspace (i.e. you cannot remember the returned struct file * after
1020 * returning to userspace).
1021 * 2) You must not call filp_close on the returned struct file * in between
1022 * calls to fget_light and fput_light.
1023 * 3) You must not clone the current task in between the calls to fget_light
1026 * The fput_needed flag returned by fget_light should be passed to the
1027 * corresponding fput_light.
1029 static unsigned long __fget_light(unsigned int fd
, fmode_t mask
)
1031 struct files_struct
*files
= current
->files
;
1034 if (atomic_read(&files
->count
) == 1) {
1035 file
= files_lookup_fd_raw(files
, fd
);
1036 if (!file
|| unlikely(file
->f_mode
& mask
))
1038 return (unsigned long)file
;
1040 file
= __fget(fd
, mask
, 1);
1043 return FDPUT_FPUT
| (unsigned long)file
;
1046 unsigned long __fdget(unsigned int fd
)
1048 return __fget_light(fd
, FMODE_PATH
);
1050 EXPORT_SYMBOL(__fdget
);
1052 unsigned long __fdget_raw(unsigned int fd
)
1054 return __fget_light(fd
, 0);
1057 unsigned long __fdget_pos(unsigned int fd
)
1059 unsigned long v
= __fdget(fd
);
1060 struct file
*file
= (struct file
*)(v
& ~3);
1062 if (file
&& (file
->f_mode
& FMODE_ATOMIC_POS
)) {
1063 if (file_count(file
) > 1) {
1064 v
|= FDPUT_POS_UNLOCK
;
1065 mutex_lock(&file
->f_pos_lock
);
1071 void __f_unlock_pos(struct file
*f
)
1073 mutex_unlock(&f
->f_pos_lock
);
1077 * We only lock f_pos if we have threads or if the file might be
1078 * shared with another process. In both cases we'll have an elevated
1079 * file count (done either by fdget() or by fork()).
1082 void set_close_on_exec(unsigned int fd
, int flag
)
1084 struct files_struct
*files
= current
->files
;
1085 struct fdtable
*fdt
;
1086 spin_lock(&files
->file_lock
);
1087 fdt
= files_fdtable(files
);
1089 __set_close_on_exec(fd
, fdt
);
1091 __clear_close_on_exec(fd
, fdt
);
1092 spin_unlock(&files
->file_lock
);
1095 bool get_close_on_exec(unsigned int fd
)
1097 struct files_struct
*files
= current
->files
;
1098 struct fdtable
*fdt
;
1101 fdt
= files_fdtable(files
);
1102 res
= close_on_exec(fd
, fdt
);
1107 static int do_dup2(struct files_struct
*files
,
1108 struct file
*file
, unsigned fd
, unsigned flags
)
1109 __releases(&files
->file_lock
)
1111 struct file
*tofree
;
1112 struct fdtable
*fdt
;
1115 * We need to detect attempts to do dup2() over allocated but still
1116 * not finished descriptor. NB: OpenBSD avoids that at the price of
1117 * extra work in their equivalent of fget() - they insert struct
1118 * file immediately after grabbing descriptor, mark it larval if
1119 * more work (e.g. actual opening) is needed and make sure that
1120 * fget() treats larval files as absent. Potentially interesting,
1121 * but while extra work in fget() is trivial, locking implications
1122 * and amount of surgery on open()-related paths in VFS are not.
1123 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1124 * deadlocks in rather amusing ways, AFAICS. All of that is out of
1125 * scope of POSIX or SUS, since neither considers shared descriptor
1126 * tables and this condition does not arise without those.
1128 fdt
= files_fdtable(files
);
1129 tofree
= fdt
->fd
[fd
];
1130 if (!tofree
&& fd_is_open(fd
, fdt
))
1133 rcu_assign_pointer(fdt
->fd
[fd
], file
);
1134 __set_open_fd(fd
, fdt
);
1135 if (flags
& O_CLOEXEC
)
1136 __set_close_on_exec(fd
, fdt
);
1138 __clear_close_on_exec(fd
, fdt
);
1139 spin_unlock(&files
->file_lock
);
1142 filp_close(tofree
, files
);
1147 spin_unlock(&files
->file_lock
);
1151 int replace_fd(unsigned fd
, struct file
*file
, unsigned flags
)
1154 struct files_struct
*files
= current
->files
;
1157 return close_fd(fd
);
1159 if (fd
>= rlimit(RLIMIT_NOFILE
))
1162 spin_lock(&files
->file_lock
);
1163 err
= expand_files(files
, fd
);
1164 if (unlikely(err
< 0))
1166 return do_dup2(files
, file
, fd
, flags
);
1169 spin_unlock(&files
->file_lock
);
1174 * __receive_fd() - Install received file into file descriptor table
1175 * @file: struct file that was received from another process
1176 * @ufd: __user pointer to write new fd number to
1177 * @o_flags: the O_* flags to apply to the new fd entry
1179 * Installs a received file into the file descriptor table, with appropriate
1180 * checks and count updates. Optionally writes the fd number to userspace, if
1183 * This helper handles its own reference counting of the incoming
1186 * Returns newly install fd or -ve on error.
1188 int __receive_fd(struct file
*file
, int __user
*ufd
, unsigned int o_flags
)
1193 error
= security_file_receive(file
);
1197 new_fd
= get_unused_fd_flags(o_flags
);
1202 error
= put_user(new_fd
, ufd
);
1204 put_unused_fd(new_fd
);
1209 fd_install(new_fd
, get_file(file
));
1210 __receive_sock(file
);
1214 int receive_fd_replace(int new_fd
, struct file
*file
, unsigned int o_flags
)
1218 error
= security_file_receive(file
);
1221 error
= replace_fd(new_fd
, file
, o_flags
);
1224 __receive_sock(file
);
1228 int receive_fd(struct file
*file
, unsigned int o_flags
)
1230 return __receive_fd(file
, NULL
, o_flags
);
1232 EXPORT_SYMBOL_GPL(receive_fd
);
1234 static int ksys_dup3(unsigned int oldfd
, unsigned int newfd
, int flags
)
1238 struct files_struct
*files
= current
->files
;
1240 if ((flags
& ~O_CLOEXEC
) != 0)
1243 if (unlikely(oldfd
== newfd
))
1246 if (newfd
>= rlimit(RLIMIT_NOFILE
))
1249 spin_lock(&files
->file_lock
);
1250 err
= expand_files(files
, newfd
);
1251 file
= files_lookup_fd_locked(files
, oldfd
);
1252 if (unlikely(!file
))
1254 if (unlikely(err
< 0)) {
1259 return do_dup2(files
, file
, newfd
, flags
);
1264 spin_unlock(&files
->file_lock
);
1268 SYSCALL_DEFINE3(dup3
, unsigned int, oldfd
, unsigned int, newfd
, int, flags
)
1270 return ksys_dup3(oldfd
, newfd
, flags
);
1273 SYSCALL_DEFINE2(dup2
, unsigned int, oldfd
, unsigned int, newfd
)
1275 if (unlikely(newfd
== oldfd
)) { /* corner case */
1276 struct files_struct
*files
= current
->files
;
1280 if (!files_lookup_fd_rcu(files
, oldfd
))
1285 return ksys_dup3(oldfd
, newfd
, 0);
1288 SYSCALL_DEFINE1(dup
, unsigned int, fildes
)
1291 struct file
*file
= fget_raw(fildes
);
1294 ret
= get_unused_fd_flags(0);
1296 fd_install(ret
, file
);
1303 int f_dupfd(unsigned int from
, struct file
*file
, unsigned flags
)
1305 unsigned long nofile
= rlimit(RLIMIT_NOFILE
);
1309 err
= alloc_fd(from
, nofile
, flags
);
1312 fd_install(err
, file
);
1317 int iterate_fd(struct files_struct
*files
, unsigned n
,
1318 int (*f
)(const void *, struct file
*, unsigned),
1321 struct fdtable
*fdt
;
1325 spin_lock(&files
->file_lock
);
1326 for (fdt
= files_fdtable(files
); n
< fdt
->max_fds
; n
++) {
1328 file
= rcu_dereference_check_fdtable(files
, fdt
->fd
[n
]);
1331 res
= f(p
, file
, n
);
1335 spin_unlock(&files
->file_lock
);
1338 EXPORT_SYMBOL(iterate_fd
);