]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/file.c
Merge tag 'nfsd-5.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux
[mirror_ubuntu-jammy-kernel.git] / fs / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/file.c
4 *
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 *
7 * Manage the dynamic fd arrays in the process files_struct.
8 */
9
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
12 #include <linux/fs.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/close_range.h>
23 #include <net/sock.h>
24
25 #include "internal.h"
26
27 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
28 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
29 /* our min() is unusable in constant expressions ;-/ */
30 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
31 unsigned int sysctl_nr_open_max =
32 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
33
34 static void __free_fdtable(struct fdtable *fdt)
35 {
36 kvfree(fdt->fd);
37 kvfree(fdt->open_fds);
38 kfree(fdt);
39 }
40
41 static void free_fdtable_rcu(struct rcu_head *rcu)
42 {
43 __free_fdtable(container_of(rcu, struct fdtable, rcu));
44 }
45
46 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
47 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
48
49 /*
50 * Copy 'count' fd bits from the old table to the new table and clear the extra
51 * space if any. This does not copy the file pointers. Called with the files
52 * spinlock held for write.
53 */
54 static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
55 unsigned int count)
56 {
57 unsigned int cpy, set;
58
59 cpy = count / BITS_PER_BYTE;
60 set = (nfdt->max_fds - count) / BITS_PER_BYTE;
61 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
62 memset((char *)nfdt->open_fds + cpy, 0, set);
63 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
64 memset((char *)nfdt->close_on_exec + cpy, 0, set);
65
66 cpy = BITBIT_SIZE(count);
67 set = BITBIT_SIZE(nfdt->max_fds) - cpy;
68 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
69 memset((char *)nfdt->full_fds_bits + cpy, 0, set);
70 }
71
72 /*
73 * Copy all file descriptors from the old table to the new, expanded table and
74 * clear the extra space. Called with the files spinlock held for write.
75 */
76 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
77 {
78 size_t cpy, set;
79
80 BUG_ON(nfdt->max_fds < ofdt->max_fds);
81
82 cpy = ofdt->max_fds * sizeof(struct file *);
83 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
84 memcpy(nfdt->fd, ofdt->fd, cpy);
85 memset((char *)nfdt->fd + cpy, 0, set);
86
87 copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
88 }
89
90 static struct fdtable * alloc_fdtable(unsigned int nr)
91 {
92 struct fdtable *fdt;
93 void *data;
94
95 /*
96 * Figure out how many fds we actually want to support in this fdtable.
97 * Allocation steps are keyed to the size of the fdarray, since it
98 * grows far faster than any of the other dynamic data. We try to fit
99 * the fdarray into comfortable page-tuned chunks: starting at 1024B
100 * and growing in powers of two from there on.
101 */
102 nr /= (1024 / sizeof(struct file *));
103 nr = roundup_pow_of_two(nr + 1);
104 nr *= (1024 / sizeof(struct file *));
105 /*
106 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
107 * had been set lower between the check in expand_files() and here. Deal
108 * with that in caller, it's cheaper that way.
109 *
110 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
111 * bitmaps handling below becomes unpleasant, to put it mildly...
112 */
113 if (unlikely(nr > sysctl_nr_open))
114 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
115
116 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
117 if (!fdt)
118 goto out;
119 fdt->max_fds = nr;
120 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
121 if (!data)
122 goto out_fdt;
123 fdt->fd = data;
124
125 data = kvmalloc(max_t(size_t,
126 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
127 GFP_KERNEL_ACCOUNT);
128 if (!data)
129 goto out_arr;
130 fdt->open_fds = data;
131 data += nr / BITS_PER_BYTE;
132 fdt->close_on_exec = data;
133 data += nr / BITS_PER_BYTE;
134 fdt->full_fds_bits = data;
135
136 return fdt;
137
138 out_arr:
139 kvfree(fdt->fd);
140 out_fdt:
141 kfree(fdt);
142 out:
143 return NULL;
144 }
145
146 /*
147 * Expand the file descriptor table.
148 * This function will allocate a new fdtable and both fd array and fdset, of
149 * the given size.
150 * Return <0 error code on error; 1 on successful completion.
151 * The files->file_lock should be held on entry, and will be held on exit.
152 */
153 static int expand_fdtable(struct files_struct *files, unsigned int nr)
154 __releases(files->file_lock)
155 __acquires(files->file_lock)
156 {
157 struct fdtable *new_fdt, *cur_fdt;
158
159 spin_unlock(&files->file_lock);
160 new_fdt = alloc_fdtable(nr);
161
162 /* make sure all fd_install() have seen resize_in_progress
163 * or have finished their rcu_read_lock_sched() section.
164 */
165 if (atomic_read(&files->count) > 1)
166 synchronize_rcu();
167
168 spin_lock(&files->file_lock);
169 if (!new_fdt)
170 return -ENOMEM;
171 /*
172 * extremely unlikely race - sysctl_nr_open decreased between the check in
173 * caller and alloc_fdtable(). Cheaper to catch it here...
174 */
175 if (unlikely(new_fdt->max_fds <= nr)) {
176 __free_fdtable(new_fdt);
177 return -EMFILE;
178 }
179 cur_fdt = files_fdtable(files);
180 BUG_ON(nr < cur_fdt->max_fds);
181 copy_fdtable(new_fdt, cur_fdt);
182 rcu_assign_pointer(files->fdt, new_fdt);
183 if (cur_fdt != &files->fdtab)
184 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
185 /* coupled with smp_rmb() in fd_install() */
186 smp_wmb();
187 return 1;
188 }
189
190 /*
191 * Expand files.
192 * This function will expand the file structures, if the requested size exceeds
193 * the current capacity and there is room for expansion.
194 * Return <0 error code on error; 0 when nothing done; 1 when files were
195 * expanded and execution may have blocked.
196 * The files->file_lock should be held on entry, and will be held on exit.
197 */
198 static int expand_files(struct files_struct *files, unsigned int nr)
199 __releases(files->file_lock)
200 __acquires(files->file_lock)
201 {
202 struct fdtable *fdt;
203 int expanded = 0;
204
205 repeat:
206 fdt = files_fdtable(files);
207
208 /* Do we need to expand? */
209 if (nr < fdt->max_fds)
210 return expanded;
211
212 /* Can we expand? */
213 if (nr >= sysctl_nr_open)
214 return -EMFILE;
215
216 if (unlikely(files->resize_in_progress)) {
217 spin_unlock(&files->file_lock);
218 expanded = 1;
219 wait_event(files->resize_wait, !files->resize_in_progress);
220 spin_lock(&files->file_lock);
221 goto repeat;
222 }
223
224 /* All good, so we try */
225 files->resize_in_progress = true;
226 expanded = expand_fdtable(files, nr);
227 files->resize_in_progress = false;
228
229 wake_up_all(&files->resize_wait);
230 return expanded;
231 }
232
233 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
234 {
235 __set_bit(fd, fdt->close_on_exec);
236 }
237
238 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
239 {
240 if (test_bit(fd, fdt->close_on_exec))
241 __clear_bit(fd, fdt->close_on_exec);
242 }
243
244 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
245 {
246 __set_bit(fd, fdt->open_fds);
247 fd /= BITS_PER_LONG;
248 if (!~fdt->open_fds[fd])
249 __set_bit(fd, fdt->full_fds_bits);
250 }
251
252 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
253 {
254 __clear_bit(fd, fdt->open_fds);
255 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
256 }
257
258 static unsigned int count_open_files(struct fdtable *fdt)
259 {
260 unsigned int size = fdt->max_fds;
261 unsigned int i;
262
263 /* Find the last open fd */
264 for (i = size / BITS_PER_LONG; i > 0; ) {
265 if (fdt->open_fds[--i])
266 break;
267 }
268 i = (i + 1) * BITS_PER_LONG;
269 return i;
270 }
271
272 static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
273 {
274 unsigned int count;
275
276 count = count_open_files(fdt);
277 if (max_fds < NR_OPEN_DEFAULT)
278 max_fds = NR_OPEN_DEFAULT;
279 return min(count, max_fds);
280 }
281
282 /*
283 * Allocate a new files structure and copy contents from the
284 * passed in files structure.
285 * errorp will be valid only when the returned files_struct is NULL.
286 */
287 struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp)
288 {
289 struct files_struct *newf;
290 struct file **old_fds, **new_fds;
291 unsigned int open_files, i;
292 struct fdtable *old_fdt, *new_fdt;
293
294 *errorp = -ENOMEM;
295 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
296 if (!newf)
297 goto out;
298
299 atomic_set(&newf->count, 1);
300
301 spin_lock_init(&newf->file_lock);
302 newf->resize_in_progress = false;
303 init_waitqueue_head(&newf->resize_wait);
304 newf->next_fd = 0;
305 new_fdt = &newf->fdtab;
306 new_fdt->max_fds = NR_OPEN_DEFAULT;
307 new_fdt->close_on_exec = newf->close_on_exec_init;
308 new_fdt->open_fds = newf->open_fds_init;
309 new_fdt->full_fds_bits = newf->full_fds_bits_init;
310 new_fdt->fd = &newf->fd_array[0];
311
312 spin_lock(&oldf->file_lock);
313 old_fdt = files_fdtable(oldf);
314 open_files = sane_fdtable_size(old_fdt, max_fds);
315
316 /*
317 * Check whether we need to allocate a larger fd array and fd set.
318 */
319 while (unlikely(open_files > new_fdt->max_fds)) {
320 spin_unlock(&oldf->file_lock);
321
322 if (new_fdt != &newf->fdtab)
323 __free_fdtable(new_fdt);
324
325 new_fdt = alloc_fdtable(open_files - 1);
326 if (!new_fdt) {
327 *errorp = -ENOMEM;
328 goto out_release;
329 }
330
331 /* beyond sysctl_nr_open; nothing to do */
332 if (unlikely(new_fdt->max_fds < open_files)) {
333 __free_fdtable(new_fdt);
334 *errorp = -EMFILE;
335 goto out_release;
336 }
337
338 /*
339 * Reacquire the oldf lock and a pointer to its fd table
340 * who knows it may have a new bigger fd table. We need
341 * the latest pointer.
342 */
343 spin_lock(&oldf->file_lock);
344 old_fdt = files_fdtable(oldf);
345 open_files = sane_fdtable_size(old_fdt, max_fds);
346 }
347
348 copy_fd_bitmaps(new_fdt, old_fdt, open_files);
349
350 old_fds = old_fdt->fd;
351 new_fds = new_fdt->fd;
352
353 for (i = open_files; i != 0; i--) {
354 struct file *f = *old_fds++;
355 if (f) {
356 get_file(f);
357 } else {
358 /*
359 * The fd may be claimed in the fd bitmap but not yet
360 * instantiated in the files array if a sibling thread
361 * is partway through open(). So make sure that this
362 * fd is available to the new process.
363 */
364 __clear_open_fd(open_files - i, new_fdt);
365 }
366 rcu_assign_pointer(*new_fds++, f);
367 }
368 spin_unlock(&oldf->file_lock);
369
370 /* clear the remainder */
371 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
372
373 rcu_assign_pointer(newf->fdt, new_fdt);
374
375 return newf;
376
377 out_release:
378 kmem_cache_free(files_cachep, newf);
379 out:
380 return NULL;
381 }
382
383 static struct fdtable *close_files(struct files_struct * files)
384 {
385 /*
386 * It is safe to dereference the fd table without RCU or
387 * ->file_lock because this is the last reference to the
388 * files structure.
389 */
390 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
391 unsigned int i, j = 0;
392
393 for (;;) {
394 unsigned long set;
395 i = j * BITS_PER_LONG;
396 if (i >= fdt->max_fds)
397 break;
398 set = fdt->open_fds[j++];
399 while (set) {
400 if (set & 1) {
401 struct file * file = xchg(&fdt->fd[i], NULL);
402 if (file) {
403 filp_close(file, files);
404 cond_resched();
405 }
406 }
407 i++;
408 set >>= 1;
409 }
410 }
411
412 return fdt;
413 }
414
415 void put_files_struct(struct files_struct *files)
416 {
417 if (atomic_dec_and_test(&files->count)) {
418 struct fdtable *fdt = close_files(files);
419
420 /* free the arrays if they are not embedded */
421 if (fdt != &files->fdtab)
422 __free_fdtable(fdt);
423 kmem_cache_free(files_cachep, files);
424 }
425 }
426
427 void exit_files(struct task_struct *tsk)
428 {
429 struct files_struct * files = tsk->files;
430
431 if (files) {
432 task_lock(tsk);
433 tsk->files = NULL;
434 task_unlock(tsk);
435 put_files_struct(files);
436 }
437 }
438
439 struct files_struct init_files = {
440 .count = ATOMIC_INIT(1),
441 .fdt = &init_files.fdtab,
442 .fdtab = {
443 .max_fds = NR_OPEN_DEFAULT,
444 .fd = &init_files.fd_array[0],
445 .close_on_exec = init_files.close_on_exec_init,
446 .open_fds = init_files.open_fds_init,
447 .full_fds_bits = init_files.full_fds_bits_init,
448 },
449 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
450 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
451 };
452
453 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
454 {
455 unsigned int maxfd = fdt->max_fds;
456 unsigned int maxbit = maxfd / BITS_PER_LONG;
457 unsigned int bitbit = start / BITS_PER_LONG;
458
459 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
460 if (bitbit > maxfd)
461 return maxfd;
462 if (bitbit > start)
463 start = bitbit;
464 return find_next_zero_bit(fdt->open_fds, maxfd, start);
465 }
466
467 /*
468 * allocate a file descriptor, mark it busy.
469 */
470 static int alloc_fd(unsigned start, unsigned end, unsigned flags)
471 {
472 struct files_struct *files = current->files;
473 unsigned int fd;
474 int error;
475 struct fdtable *fdt;
476
477 spin_lock(&files->file_lock);
478 repeat:
479 fdt = files_fdtable(files);
480 fd = start;
481 if (fd < files->next_fd)
482 fd = files->next_fd;
483
484 if (fd < fdt->max_fds)
485 fd = find_next_fd(fdt, fd);
486
487 /*
488 * N.B. For clone tasks sharing a files structure, this test
489 * will limit the total number of files that can be opened.
490 */
491 error = -EMFILE;
492 if (fd >= end)
493 goto out;
494
495 error = expand_files(files, fd);
496 if (error < 0)
497 goto out;
498
499 /*
500 * If we needed to expand the fs array we
501 * might have blocked - try again.
502 */
503 if (error)
504 goto repeat;
505
506 if (start <= files->next_fd)
507 files->next_fd = fd + 1;
508
509 __set_open_fd(fd, fdt);
510 if (flags & O_CLOEXEC)
511 __set_close_on_exec(fd, fdt);
512 else
513 __clear_close_on_exec(fd, fdt);
514 error = fd;
515 #if 1
516 /* Sanity check */
517 if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
518 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
519 rcu_assign_pointer(fdt->fd[fd], NULL);
520 }
521 #endif
522
523 out:
524 spin_unlock(&files->file_lock);
525 return error;
526 }
527
528 int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
529 {
530 return alloc_fd(0, nofile, flags);
531 }
532
533 int get_unused_fd_flags(unsigned flags)
534 {
535 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
536 }
537 EXPORT_SYMBOL(get_unused_fd_flags);
538
539 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
540 {
541 struct fdtable *fdt = files_fdtable(files);
542 __clear_open_fd(fd, fdt);
543 if (fd < files->next_fd)
544 files->next_fd = fd;
545 }
546
547 void put_unused_fd(unsigned int fd)
548 {
549 struct files_struct *files = current->files;
550 spin_lock(&files->file_lock);
551 __put_unused_fd(files, fd);
552 spin_unlock(&files->file_lock);
553 }
554
555 EXPORT_SYMBOL(put_unused_fd);
556
557 /*
558 * Install a file pointer in the fd array.
559 *
560 * The VFS is full of places where we drop the files lock between
561 * setting the open_fds bitmap and installing the file in the file
562 * array. At any such point, we are vulnerable to a dup2() race
563 * installing a file in the array before us. We need to detect this and
564 * fput() the struct file we are about to overwrite in this case.
565 *
566 * It should never happen - if we allow dup2() do it, _really_ bad things
567 * will follow.
568 *
569 * This consumes the "file" refcount, so callers should treat it
570 * as if they had called fput(file).
571 */
572
573 void fd_install(unsigned int fd, struct file *file)
574 {
575 struct files_struct *files = current->files;
576 struct fdtable *fdt;
577
578 rcu_read_lock_sched();
579
580 if (unlikely(files->resize_in_progress)) {
581 rcu_read_unlock_sched();
582 spin_lock(&files->file_lock);
583 fdt = files_fdtable(files);
584 BUG_ON(fdt->fd[fd] != NULL);
585 rcu_assign_pointer(fdt->fd[fd], file);
586 spin_unlock(&files->file_lock);
587 return;
588 }
589 /* coupled with smp_wmb() in expand_fdtable() */
590 smp_rmb();
591 fdt = rcu_dereference_sched(files->fdt);
592 BUG_ON(fdt->fd[fd] != NULL);
593 rcu_assign_pointer(fdt->fd[fd], file);
594 rcu_read_unlock_sched();
595 }
596
597 EXPORT_SYMBOL(fd_install);
598
599 /**
600 * pick_file - return file associatd with fd
601 * @files: file struct to retrieve file from
602 * @fd: file descriptor to retrieve file for
603 *
604 * If this functions returns an EINVAL error pointer the fd was beyond the
605 * current maximum number of file descriptors for that fdtable.
606 *
607 * Returns: The file associated with @fd, on error returns an error pointer.
608 */
609 static struct file *pick_file(struct files_struct *files, unsigned fd)
610 {
611 struct file *file;
612 struct fdtable *fdt;
613
614 spin_lock(&files->file_lock);
615 fdt = files_fdtable(files);
616 if (fd >= fdt->max_fds) {
617 file = ERR_PTR(-EINVAL);
618 goto out_unlock;
619 }
620 file = fdt->fd[fd];
621 if (!file) {
622 file = ERR_PTR(-EBADF);
623 goto out_unlock;
624 }
625 rcu_assign_pointer(fdt->fd[fd], NULL);
626 __put_unused_fd(files, fd);
627
628 out_unlock:
629 spin_unlock(&files->file_lock);
630 return file;
631 }
632
633 int close_fd(unsigned fd)
634 {
635 struct files_struct *files = current->files;
636 struct file *file;
637
638 file = pick_file(files, fd);
639 if (IS_ERR(file))
640 return -EBADF;
641
642 return filp_close(file, files);
643 }
644 EXPORT_SYMBOL(close_fd); /* for ksys_close() */
645
646 /**
647 * last_fd - return last valid index into fd table
648 * @cur_fds: files struct
649 *
650 * Context: Either rcu read lock or files_lock must be held.
651 *
652 * Returns: Last valid index into fdtable.
653 */
654 static inline unsigned last_fd(struct fdtable *fdt)
655 {
656 return fdt->max_fds - 1;
657 }
658
659 static inline void __range_cloexec(struct files_struct *cur_fds,
660 unsigned int fd, unsigned int max_fd)
661 {
662 struct fdtable *fdt;
663
664 /* make sure we're using the correct maximum value */
665 spin_lock(&cur_fds->file_lock);
666 fdt = files_fdtable(cur_fds);
667 max_fd = min(last_fd(fdt), max_fd);
668 if (fd <= max_fd)
669 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
670 spin_unlock(&cur_fds->file_lock);
671 }
672
673 static inline void __range_close(struct files_struct *cur_fds, unsigned int fd,
674 unsigned int max_fd)
675 {
676 while (fd <= max_fd) {
677 struct file *file;
678
679 file = pick_file(cur_fds, fd++);
680 if (!IS_ERR(file)) {
681 /* found a valid file to close */
682 filp_close(file, cur_fds);
683 cond_resched();
684 continue;
685 }
686
687 /* beyond the last fd in that table */
688 if (PTR_ERR(file) == -EINVAL)
689 return;
690 }
691 }
692
693 /**
694 * __close_range() - Close all file descriptors in a given range.
695 *
696 * @fd: starting file descriptor to close
697 * @max_fd: last file descriptor to close
698 *
699 * This closes a range of file descriptors. All file descriptors
700 * from @fd up to and including @max_fd are closed.
701 */
702 int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
703 {
704 struct task_struct *me = current;
705 struct files_struct *cur_fds = me->files, *fds = NULL;
706
707 if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
708 return -EINVAL;
709
710 if (fd > max_fd)
711 return -EINVAL;
712
713 if (flags & CLOSE_RANGE_UNSHARE) {
714 int ret;
715 unsigned int max_unshare_fds = NR_OPEN_MAX;
716
717 /*
718 * If the caller requested all fds to be made cloexec we always
719 * copy all of the file descriptors since they still want to
720 * use them.
721 */
722 if (!(flags & CLOSE_RANGE_CLOEXEC)) {
723 /*
724 * If the requested range is greater than the current
725 * maximum, we're closing everything so only copy all
726 * file descriptors beneath the lowest file descriptor.
727 */
728 rcu_read_lock();
729 if (max_fd >= last_fd(files_fdtable(cur_fds)))
730 max_unshare_fds = fd;
731 rcu_read_unlock();
732 }
733
734 ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
735 if (ret)
736 return ret;
737
738 /*
739 * We used to share our file descriptor table, and have now
740 * created a private one, make sure we're using it below.
741 */
742 if (fds)
743 swap(cur_fds, fds);
744 }
745
746 if (flags & CLOSE_RANGE_CLOEXEC)
747 __range_cloexec(cur_fds, fd, max_fd);
748 else
749 __range_close(cur_fds, fd, max_fd);
750
751 if (fds) {
752 /*
753 * We're done closing the files we were supposed to. Time to install
754 * the new file descriptor table and drop the old one.
755 */
756 task_lock(me);
757 me->files = cur_fds;
758 task_unlock(me);
759 put_files_struct(fds);
760 }
761
762 return 0;
763 }
764
765 /*
766 * See close_fd_get_file() below, this variant assumes current->files->file_lock
767 * is held.
768 */
769 int __close_fd_get_file(unsigned int fd, struct file **res)
770 {
771 struct files_struct *files = current->files;
772 struct file *file;
773 struct fdtable *fdt;
774
775 fdt = files_fdtable(files);
776 if (fd >= fdt->max_fds)
777 goto out_err;
778 file = fdt->fd[fd];
779 if (!file)
780 goto out_err;
781 rcu_assign_pointer(fdt->fd[fd], NULL);
782 __put_unused_fd(files, fd);
783 get_file(file);
784 *res = file;
785 return 0;
786 out_err:
787 *res = NULL;
788 return -ENOENT;
789 }
790
791 /*
792 * variant of close_fd that gets a ref on the file for later fput.
793 * The caller must ensure that filp_close() called on the file, and then
794 * an fput().
795 */
796 int close_fd_get_file(unsigned int fd, struct file **res)
797 {
798 struct files_struct *files = current->files;
799 int ret;
800
801 spin_lock(&files->file_lock);
802 ret = __close_fd_get_file(fd, res);
803 spin_unlock(&files->file_lock);
804
805 return ret;
806 }
807
808 void do_close_on_exec(struct files_struct *files)
809 {
810 unsigned i;
811 struct fdtable *fdt;
812
813 /* exec unshares first */
814 spin_lock(&files->file_lock);
815 for (i = 0; ; i++) {
816 unsigned long set;
817 unsigned fd = i * BITS_PER_LONG;
818 fdt = files_fdtable(files);
819 if (fd >= fdt->max_fds)
820 break;
821 set = fdt->close_on_exec[i];
822 if (!set)
823 continue;
824 fdt->close_on_exec[i] = 0;
825 for ( ; set ; fd++, set >>= 1) {
826 struct file *file;
827 if (!(set & 1))
828 continue;
829 file = fdt->fd[fd];
830 if (!file)
831 continue;
832 rcu_assign_pointer(fdt->fd[fd], NULL);
833 __put_unused_fd(files, fd);
834 spin_unlock(&files->file_lock);
835 filp_close(file, files);
836 cond_resched();
837 spin_lock(&files->file_lock);
838 }
839
840 }
841 spin_unlock(&files->file_lock);
842 }
843
844 static struct file *__fget_files(struct files_struct *files, unsigned int fd,
845 fmode_t mask, unsigned int refs)
846 {
847 struct file *file;
848
849 rcu_read_lock();
850 loop:
851 file = files_lookup_fd_rcu(files, fd);
852 if (file) {
853 /* File object ref couldn't be taken.
854 * dup2() atomicity guarantee is the reason
855 * we loop to catch the new file (or NULL pointer)
856 */
857 if (file->f_mode & mask)
858 file = NULL;
859 else if (!get_file_rcu_many(file, refs))
860 goto loop;
861 }
862 rcu_read_unlock();
863
864 return file;
865 }
866
867 static inline struct file *__fget(unsigned int fd, fmode_t mask,
868 unsigned int refs)
869 {
870 return __fget_files(current->files, fd, mask, refs);
871 }
872
873 struct file *fget_many(unsigned int fd, unsigned int refs)
874 {
875 return __fget(fd, FMODE_PATH, refs);
876 }
877
878 struct file *fget(unsigned int fd)
879 {
880 return __fget(fd, FMODE_PATH, 1);
881 }
882 EXPORT_SYMBOL(fget);
883
884 struct file *fget_raw(unsigned int fd)
885 {
886 return __fget(fd, 0, 1);
887 }
888 EXPORT_SYMBOL(fget_raw);
889
890 struct file *fget_task(struct task_struct *task, unsigned int fd)
891 {
892 struct file *file = NULL;
893
894 task_lock(task);
895 if (task->files)
896 file = __fget_files(task->files, fd, 0, 1);
897 task_unlock(task);
898
899 return file;
900 }
901
902 struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd)
903 {
904 /* Must be called with rcu_read_lock held */
905 struct files_struct *files;
906 struct file *file = NULL;
907
908 task_lock(task);
909 files = task->files;
910 if (files)
911 file = files_lookup_fd_rcu(files, fd);
912 task_unlock(task);
913
914 return file;
915 }
916
917 struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret_fd)
918 {
919 /* Must be called with rcu_read_lock held */
920 struct files_struct *files;
921 unsigned int fd = *ret_fd;
922 struct file *file = NULL;
923
924 task_lock(task);
925 files = task->files;
926 if (files) {
927 for (; fd < files_fdtable(files)->max_fds; fd++) {
928 file = files_lookup_fd_rcu(files, fd);
929 if (file)
930 break;
931 }
932 }
933 task_unlock(task);
934 *ret_fd = fd;
935 return file;
936 }
937
938 /*
939 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
940 *
941 * You can use this instead of fget if you satisfy all of the following
942 * conditions:
943 * 1) You must call fput_light before exiting the syscall and returning control
944 * to userspace (i.e. you cannot remember the returned struct file * after
945 * returning to userspace).
946 * 2) You must not call filp_close on the returned struct file * in between
947 * calls to fget_light and fput_light.
948 * 3) You must not clone the current task in between the calls to fget_light
949 * and fput_light.
950 *
951 * The fput_needed flag returned by fget_light should be passed to the
952 * corresponding fput_light.
953 */
954 static unsigned long __fget_light(unsigned int fd, fmode_t mask)
955 {
956 struct files_struct *files = current->files;
957 struct file *file;
958
959 if (atomic_read(&files->count) == 1) {
960 file = files_lookup_fd_raw(files, fd);
961 if (!file || unlikely(file->f_mode & mask))
962 return 0;
963 return (unsigned long)file;
964 } else {
965 file = __fget(fd, mask, 1);
966 if (!file)
967 return 0;
968 return FDPUT_FPUT | (unsigned long)file;
969 }
970 }
971 unsigned long __fdget(unsigned int fd)
972 {
973 return __fget_light(fd, FMODE_PATH);
974 }
975 EXPORT_SYMBOL(__fdget);
976
977 unsigned long __fdget_raw(unsigned int fd)
978 {
979 return __fget_light(fd, 0);
980 }
981
982 unsigned long __fdget_pos(unsigned int fd)
983 {
984 unsigned long v = __fdget(fd);
985 struct file *file = (struct file *)(v & ~3);
986
987 if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
988 if (file_count(file) > 1) {
989 v |= FDPUT_POS_UNLOCK;
990 mutex_lock(&file->f_pos_lock);
991 }
992 }
993 return v;
994 }
995
996 void __f_unlock_pos(struct file *f)
997 {
998 mutex_unlock(&f->f_pos_lock);
999 }
1000
1001 /*
1002 * We only lock f_pos if we have threads or if the file might be
1003 * shared with another process. In both cases we'll have an elevated
1004 * file count (done either by fdget() or by fork()).
1005 */
1006
1007 void set_close_on_exec(unsigned int fd, int flag)
1008 {
1009 struct files_struct *files = current->files;
1010 struct fdtable *fdt;
1011 spin_lock(&files->file_lock);
1012 fdt = files_fdtable(files);
1013 if (flag)
1014 __set_close_on_exec(fd, fdt);
1015 else
1016 __clear_close_on_exec(fd, fdt);
1017 spin_unlock(&files->file_lock);
1018 }
1019
1020 bool get_close_on_exec(unsigned int fd)
1021 {
1022 struct files_struct *files = current->files;
1023 struct fdtable *fdt;
1024 bool res;
1025 rcu_read_lock();
1026 fdt = files_fdtable(files);
1027 res = close_on_exec(fd, fdt);
1028 rcu_read_unlock();
1029 return res;
1030 }
1031
1032 static int do_dup2(struct files_struct *files,
1033 struct file *file, unsigned fd, unsigned flags)
1034 __releases(&files->file_lock)
1035 {
1036 struct file *tofree;
1037 struct fdtable *fdt;
1038
1039 /*
1040 * We need to detect attempts to do dup2() over allocated but still
1041 * not finished descriptor. NB: OpenBSD avoids that at the price of
1042 * extra work in their equivalent of fget() - they insert struct
1043 * file immediately after grabbing descriptor, mark it larval if
1044 * more work (e.g. actual opening) is needed and make sure that
1045 * fget() treats larval files as absent. Potentially interesting,
1046 * but while extra work in fget() is trivial, locking implications
1047 * and amount of surgery on open()-related paths in VFS are not.
1048 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1049 * deadlocks in rather amusing ways, AFAICS. All of that is out of
1050 * scope of POSIX or SUS, since neither considers shared descriptor
1051 * tables and this condition does not arise without those.
1052 */
1053 fdt = files_fdtable(files);
1054 tofree = fdt->fd[fd];
1055 if (!tofree && fd_is_open(fd, fdt))
1056 goto Ebusy;
1057 get_file(file);
1058 rcu_assign_pointer(fdt->fd[fd], file);
1059 __set_open_fd(fd, fdt);
1060 if (flags & O_CLOEXEC)
1061 __set_close_on_exec(fd, fdt);
1062 else
1063 __clear_close_on_exec(fd, fdt);
1064 spin_unlock(&files->file_lock);
1065
1066 if (tofree)
1067 filp_close(tofree, files);
1068
1069 return fd;
1070
1071 Ebusy:
1072 spin_unlock(&files->file_lock);
1073 return -EBUSY;
1074 }
1075
1076 int replace_fd(unsigned fd, struct file *file, unsigned flags)
1077 {
1078 int err;
1079 struct files_struct *files = current->files;
1080
1081 if (!file)
1082 return close_fd(fd);
1083
1084 if (fd >= rlimit(RLIMIT_NOFILE))
1085 return -EBADF;
1086
1087 spin_lock(&files->file_lock);
1088 err = expand_files(files, fd);
1089 if (unlikely(err < 0))
1090 goto out_unlock;
1091 return do_dup2(files, file, fd, flags);
1092
1093 out_unlock:
1094 spin_unlock(&files->file_lock);
1095 return err;
1096 }
1097
1098 /**
1099 * __receive_fd() - Install received file into file descriptor table
1100 * @file: struct file that was received from another process
1101 * @ufd: __user pointer to write new fd number to
1102 * @o_flags: the O_* flags to apply to the new fd entry
1103 *
1104 * Installs a received file into the file descriptor table, with appropriate
1105 * checks and count updates. Optionally writes the fd number to userspace, if
1106 * @ufd is non-NULL.
1107 *
1108 * This helper handles its own reference counting of the incoming
1109 * struct file.
1110 *
1111 * Returns newly install fd or -ve on error.
1112 */
1113 int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
1114 {
1115 int new_fd;
1116 int error;
1117
1118 error = security_file_receive(file);
1119 if (error)
1120 return error;
1121
1122 new_fd = get_unused_fd_flags(o_flags);
1123 if (new_fd < 0)
1124 return new_fd;
1125
1126 if (ufd) {
1127 error = put_user(new_fd, ufd);
1128 if (error) {
1129 put_unused_fd(new_fd);
1130 return error;
1131 }
1132 }
1133
1134 fd_install(new_fd, get_file(file));
1135 __receive_sock(file);
1136 return new_fd;
1137 }
1138
1139 int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
1140 {
1141 int error;
1142
1143 error = security_file_receive(file);
1144 if (error)
1145 return error;
1146 error = replace_fd(new_fd, file, o_flags);
1147 if (error)
1148 return error;
1149 __receive_sock(file);
1150 return new_fd;
1151 }
1152
1153 int receive_fd(struct file *file, unsigned int o_flags)
1154 {
1155 return __receive_fd(file, NULL, o_flags);
1156 }
1157 EXPORT_SYMBOL_GPL(receive_fd);
1158
1159 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1160 {
1161 int err = -EBADF;
1162 struct file *file;
1163 struct files_struct *files = current->files;
1164
1165 if ((flags & ~O_CLOEXEC) != 0)
1166 return -EINVAL;
1167
1168 if (unlikely(oldfd == newfd))
1169 return -EINVAL;
1170
1171 if (newfd >= rlimit(RLIMIT_NOFILE))
1172 return -EBADF;
1173
1174 spin_lock(&files->file_lock);
1175 err = expand_files(files, newfd);
1176 file = files_lookup_fd_locked(files, oldfd);
1177 if (unlikely(!file))
1178 goto Ebadf;
1179 if (unlikely(err < 0)) {
1180 if (err == -EMFILE)
1181 goto Ebadf;
1182 goto out_unlock;
1183 }
1184 return do_dup2(files, file, newfd, flags);
1185
1186 Ebadf:
1187 err = -EBADF;
1188 out_unlock:
1189 spin_unlock(&files->file_lock);
1190 return err;
1191 }
1192
1193 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1194 {
1195 return ksys_dup3(oldfd, newfd, flags);
1196 }
1197
1198 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1199 {
1200 if (unlikely(newfd == oldfd)) { /* corner case */
1201 struct files_struct *files = current->files;
1202 int retval = oldfd;
1203
1204 rcu_read_lock();
1205 if (!files_lookup_fd_rcu(files, oldfd))
1206 retval = -EBADF;
1207 rcu_read_unlock();
1208 return retval;
1209 }
1210 return ksys_dup3(oldfd, newfd, 0);
1211 }
1212
1213 SYSCALL_DEFINE1(dup, unsigned int, fildes)
1214 {
1215 int ret = -EBADF;
1216 struct file *file = fget_raw(fildes);
1217
1218 if (file) {
1219 ret = get_unused_fd_flags(0);
1220 if (ret >= 0)
1221 fd_install(ret, file);
1222 else
1223 fput(file);
1224 }
1225 return ret;
1226 }
1227
1228 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1229 {
1230 unsigned long nofile = rlimit(RLIMIT_NOFILE);
1231 int err;
1232 if (from >= nofile)
1233 return -EINVAL;
1234 err = alloc_fd(from, nofile, flags);
1235 if (err >= 0) {
1236 get_file(file);
1237 fd_install(err, file);
1238 }
1239 return err;
1240 }
1241
1242 int iterate_fd(struct files_struct *files, unsigned n,
1243 int (*f)(const void *, struct file *, unsigned),
1244 const void *p)
1245 {
1246 struct fdtable *fdt;
1247 int res = 0;
1248 if (!files)
1249 return 0;
1250 spin_lock(&files->file_lock);
1251 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1252 struct file *file;
1253 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1254 if (!file)
1255 continue;
1256 res = f(p, file, n);
1257 if (res)
1258 break;
1259 }
1260 spin_unlock(&files->file_lock);
1261 return res;
1262 }
1263 EXPORT_SYMBOL(iterate_fd);