]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/file.c
fscrypt: remove reachable WARN in fscrypt_setup_iv_ino_lblk_32_key()
[mirror_ubuntu-hirsute-kernel.git] / fs / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/file.c
4 *
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 *
7 * Manage the dynamic fd arrays in the process files_struct.
8 */
9
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
12 #include <linux/fs.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/close_range.h>
23 #include <net/sock.h>
24 #include <linux/io_uring.h>
25
26 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
27 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
28 /* our min() is unusable in constant expressions ;-/ */
29 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
30 unsigned int sysctl_nr_open_max =
31 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
32
33 static void __free_fdtable(struct fdtable *fdt)
34 {
35 kvfree(fdt->fd);
36 kvfree(fdt->open_fds);
37 kfree(fdt);
38 }
39
40 static void free_fdtable_rcu(struct rcu_head *rcu)
41 {
42 __free_fdtable(container_of(rcu, struct fdtable, rcu));
43 }
44
45 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
46 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
47
48 /*
49 * Copy 'count' fd bits from the old table to the new table and clear the extra
50 * space if any. This does not copy the file pointers. Called with the files
51 * spinlock held for write.
52 */
53 static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
54 unsigned int count)
55 {
56 unsigned int cpy, set;
57
58 cpy = count / BITS_PER_BYTE;
59 set = (nfdt->max_fds - count) / BITS_PER_BYTE;
60 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
61 memset((char *)nfdt->open_fds + cpy, 0, set);
62 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
63 memset((char *)nfdt->close_on_exec + cpy, 0, set);
64
65 cpy = BITBIT_SIZE(count);
66 set = BITBIT_SIZE(nfdt->max_fds) - cpy;
67 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
68 memset((char *)nfdt->full_fds_bits + cpy, 0, set);
69 }
70
71 /*
72 * Copy all file descriptors from the old table to the new, expanded table and
73 * clear the extra space. Called with the files spinlock held for write.
74 */
75 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
76 {
77 size_t cpy, set;
78
79 BUG_ON(nfdt->max_fds < ofdt->max_fds);
80
81 cpy = ofdt->max_fds * sizeof(struct file *);
82 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
83 memcpy(nfdt->fd, ofdt->fd, cpy);
84 memset((char *)nfdt->fd + cpy, 0, set);
85
86 copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
87 }
88
89 static struct fdtable * alloc_fdtable(unsigned int nr)
90 {
91 struct fdtable *fdt;
92 void *data;
93
94 /*
95 * Figure out how many fds we actually want to support in this fdtable.
96 * Allocation steps are keyed to the size of the fdarray, since it
97 * grows far faster than any of the other dynamic data. We try to fit
98 * the fdarray into comfortable page-tuned chunks: starting at 1024B
99 * and growing in powers of two from there on.
100 */
101 nr /= (1024 / sizeof(struct file *));
102 nr = roundup_pow_of_two(nr + 1);
103 nr *= (1024 / sizeof(struct file *));
104 /*
105 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
106 * had been set lower between the check in expand_files() and here. Deal
107 * with that in caller, it's cheaper that way.
108 *
109 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
110 * bitmaps handling below becomes unpleasant, to put it mildly...
111 */
112 if (unlikely(nr > sysctl_nr_open))
113 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
114
115 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
116 if (!fdt)
117 goto out;
118 fdt->max_fds = nr;
119 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
120 if (!data)
121 goto out_fdt;
122 fdt->fd = data;
123
124 data = kvmalloc(max_t(size_t,
125 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
126 GFP_KERNEL_ACCOUNT);
127 if (!data)
128 goto out_arr;
129 fdt->open_fds = data;
130 data += nr / BITS_PER_BYTE;
131 fdt->close_on_exec = data;
132 data += nr / BITS_PER_BYTE;
133 fdt->full_fds_bits = data;
134
135 return fdt;
136
137 out_arr:
138 kvfree(fdt->fd);
139 out_fdt:
140 kfree(fdt);
141 out:
142 return NULL;
143 }
144
145 /*
146 * Expand the file descriptor table.
147 * This function will allocate a new fdtable and both fd array and fdset, of
148 * the given size.
149 * Return <0 error code on error; 1 on successful completion.
150 * The files->file_lock should be held on entry, and will be held on exit.
151 */
152 static int expand_fdtable(struct files_struct *files, unsigned int nr)
153 __releases(files->file_lock)
154 __acquires(files->file_lock)
155 {
156 struct fdtable *new_fdt, *cur_fdt;
157
158 spin_unlock(&files->file_lock);
159 new_fdt = alloc_fdtable(nr);
160
161 /* make sure all __fd_install() have seen resize_in_progress
162 * or have finished their rcu_read_lock_sched() section.
163 */
164 if (atomic_read(&files->count) > 1)
165 synchronize_rcu();
166
167 spin_lock(&files->file_lock);
168 if (!new_fdt)
169 return -ENOMEM;
170 /*
171 * extremely unlikely race - sysctl_nr_open decreased between the check in
172 * caller and alloc_fdtable(). Cheaper to catch it here...
173 */
174 if (unlikely(new_fdt->max_fds <= nr)) {
175 __free_fdtable(new_fdt);
176 return -EMFILE;
177 }
178 cur_fdt = files_fdtable(files);
179 BUG_ON(nr < cur_fdt->max_fds);
180 copy_fdtable(new_fdt, cur_fdt);
181 rcu_assign_pointer(files->fdt, new_fdt);
182 if (cur_fdt != &files->fdtab)
183 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
184 /* coupled with smp_rmb() in __fd_install() */
185 smp_wmb();
186 return 1;
187 }
188
189 /*
190 * Expand files.
191 * This function will expand the file structures, if the requested size exceeds
192 * the current capacity and there is room for expansion.
193 * Return <0 error code on error; 0 when nothing done; 1 when files were
194 * expanded and execution may have blocked.
195 * The files->file_lock should be held on entry, and will be held on exit.
196 */
197 static int expand_files(struct files_struct *files, unsigned int nr)
198 __releases(files->file_lock)
199 __acquires(files->file_lock)
200 {
201 struct fdtable *fdt;
202 int expanded = 0;
203
204 repeat:
205 fdt = files_fdtable(files);
206
207 /* Do we need to expand? */
208 if (nr < fdt->max_fds)
209 return expanded;
210
211 /* Can we expand? */
212 if (nr >= sysctl_nr_open)
213 return -EMFILE;
214
215 if (unlikely(files->resize_in_progress)) {
216 spin_unlock(&files->file_lock);
217 expanded = 1;
218 wait_event(files->resize_wait, !files->resize_in_progress);
219 spin_lock(&files->file_lock);
220 goto repeat;
221 }
222
223 /* All good, so we try */
224 files->resize_in_progress = true;
225 expanded = expand_fdtable(files, nr);
226 files->resize_in_progress = false;
227
228 wake_up_all(&files->resize_wait);
229 return expanded;
230 }
231
232 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
233 {
234 __set_bit(fd, fdt->close_on_exec);
235 }
236
237 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
238 {
239 if (test_bit(fd, fdt->close_on_exec))
240 __clear_bit(fd, fdt->close_on_exec);
241 }
242
243 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
244 {
245 __set_bit(fd, fdt->open_fds);
246 fd /= BITS_PER_LONG;
247 if (!~fdt->open_fds[fd])
248 __set_bit(fd, fdt->full_fds_bits);
249 }
250
251 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
252 {
253 __clear_bit(fd, fdt->open_fds);
254 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
255 }
256
257 static unsigned int count_open_files(struct fdtable *fdt)
258 {
259 unsigned int size = fdt->max_fds;
260 unsigned int i;
261
262 /* Find the last open fd */
263 for (i = size / BITS_PER_LONG; i > 0; ) {
264 if (fdt->open_fds[--i])
265 break;
266 }
267 i = (i + 1) * BITS_PER_LONG;
268 return i;
269 }
270
271 static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
272 {
273 unsigned int count;
274
275 count = count_open_files(fdt);
276 if (max_fds < NR_OPEN_DEFAULT)
277 max_fds = NR_OPEN_DEFAULT;
278 return min(count, max_fds);
279 }
280
281 /*
282 * Allocate a new files structure and copy contents from the
283 * passed in files structure.
284 * errorp will be valid only when the returned files_struct is NULL.
285 */
286 struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp)
287 {
288 struct files_struct *newf;
289 struct file **old_fds, **new_fds;
290 unsigned int open_files, i;
291 struct fdtable *old_fdt, *new_fdt;
292
293 *errorp = -ENOMEM;
294 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
295 if (!newf)
296 goto out;
297
298 atomic_set(&newf->count, 1);
299
300 spin_lock_init(&newf->file_lock);
301 newf->resize_in_progress = false;
302 init_waitqueue_head(&newf->resize_wait);
303 newf->next_fd = 0;
304 new_fdt = &newf->fdtab;
305 new_fdt->max_fds = NR_OPEN_DEFAULT;
306 new_fdt->close_on_exec = newf->close_on_exec_init;
307 new_fdt->open_fds = newf->open_fds_init;
308 new_fdt->full_fds_bits = newf->full_fds_bits_init;
309 new_fdt->fd = &newf->fd_array[0];
310
311 spin_lock(&oldf->file_lock);
312 old_fdt = files_fdtable(oldf);
313 open_files = sane_fdtable_size(old_fdt, max_fds);
314
315 /*
316 * Check whether we need to allocate a larger fd array and fd set.
317 */
318 while (unlikely(open_files > new_fdt->max_fds)) {
319 spin_unlock(&oldf->file_lock);
320
321 if (new_fdt != &newf->fdtab)
322 __free_fdtable(new_fdt);
323
324 new_fdt = alloc_fdtable(open_files - 1);
325 if (!new_fdt) {
326 *errorp = -ENOMEM;
327 goto out_release;
328 }
329
330 /* beyond sysctl_nr_open; nothing to do */
331 if (unlikely(new_fdt->max_fds < open_files)) {
332 __free_fdtable(new_fdt);
333 *errorp = -EMFILE;
334 goto out_release;
335 }
336
337 /*
338 * Reacquire the oldf lock and a pointer to its fd table
339 * who knows it may have a new bigger fd table. We need
340 * the latest pointer.
341 */
342 spin_lock(&oldf->file_lock);
343 old_fdt = files_fdtable(oldf);
344 open_files = sane_fdtable_size(old_fdt, max_fds);
345 }
346
347 copy_fd_bitmaps(new_fdt, old_fdt, open_files);
348
349 old_fds = old_fdt->fd;
350 new_fds = new_fdt->fd;
351
352 for (i = open_files; i != 0; i--) {
353 struct file *f = *old_fds++;
354 if (f) {
355 get_file(f);
356 } else {
357 /*
358 * The fd may be claimed in the fd bitmap but not yet
359 * instantiated in the files array if a sibling thread
360 * is partway through open(). So make sure that this
361 * fd is available to the new process.
362 */
363 __clear_open_fd(open_files - i, new_fdt);
364 }
365 rcu_assign_pointer(*new_fds++, f);
366 }
367 spin_unlock(&oldf->file_lock);
368
369 /* clear the remainder */
370 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
371
372 rcu_assign_pointer(newf->fdt, new_fdt);
373
374 return newf;
375
376 out_release:
377 kmem_cache_free(files_cachep, newf);
378 out:
379 return NULL;
380 }
381
382 static struct fdtable *close_files(struct files_struct * files)
383 {
384 /*
385 * It is safe to dereference the fd table without RCU or
386 * ->file_lock because this is the last reference to the
387 * files structure.
388 */
389 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
390 unsigned int i, j = 0;
391
392 for (;;) {
393 unsigned long set;
394 i = j * BITS_PER_LONG;
395 if (i >= fdt->max_fds)
396 break;
397 set = fdt->open_fds[j++];
398 while (set) {
399 if (set & 1) {
400 struct file * file = xchg(&fdt->fd[i], NULL);
401 if (file) {
402 filp_close(file, files);
403 cond_resched();
404 }
405 }
406 i++;
407 set >>= 1;
408 }
409 }
410
411 return fdt;
412 }
413
414 struct files_struct *get_files_struct(struct task_struct *task)
415 {
416 struct files_struct *files;
417
418 task_lock(task);
419 files = task->files;
420 if (files)
421 atomic_inc(&files->count);
422 task_unlock(task);
423
424 return files;
425 }
426
427 void put_files_struct(struct files_struct *files)
428 {
429 if (atomic_dec_and_test(&files->count)) {
430 struct fdtable *fdt = close_files(files);
431
432 /* free the arrays if they are not embedded */
433 if (fdt != &files->fdtab)
434 __free_fdtable(fdt);
435 kmem_cache_free(files_cachep, files);
436 }
437 }
438
439 void reset_files_struct(struct files_struct *files)
440 {
441 struct task_struct *tsk = current;
442 struct files_struct *old;
443
444 old = tsk->files;
445 task_lock(tsk);
446 tsk->files = files;
447 task_unlock(tsk);
448 put_files_struct(old);
449 }
450
451 void exit_files(struct task_struct *tsk)
452 {
453 struct files_struct * files = tsk->files;
454
455 if (files) {
456 io_uring_files_cancel(files);
457 task_lock(tsk);
458 tsk->files = NULL;
459 task_unlock(tsk);
460 put_files_struct(files);
461 }
462 }
463
464 struct files_struct init_files = {
465 .count = ATOMIC_INIT(1),
466 .fdt = &init_files.fdtab,
467 .fdtab = {
468 .max_fds = NR_OPEN_DEFAULT,
469 .fd = &init_files.fd_array[0],
470 .close_on_exec = init_files.close_on_exec_init,
471 .open_fds = init_files.open_fds_init,
472 .full_fds_bits = init_files.full_fds_bits_init,
473 },
474 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
475 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
476 };
477
478 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
479 {
480 unsigned int maxfd = fdt->max_fds;
481 unsigned int maxbit = maxfd / BITS_PER_LONG;
482 unsigned int bitbit = start / BITS_PER_LONG;
483
484 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
485 if (bitbit > maxfd)
486 return maxfd;
487 if (bitbit > start)
488 start = bitbit;
489 return find_next_zero_bit(fdt->open_fds, maxfd, start);
490 }
491
492 /*
493 * allocate a file descriptor, mark it busy.
494 */
495 int __alloc_fd(struct files_struct *files,
496 unsigned start, unsigned end, unsigned flags)
497 {
498 unsigned int fd;
499 int error;
500 struct fdtable *fdt;
501
502 spin_lock(&files->file_lock);
503 repeat:
504 fdt = files_fdtable(files);
505 fd = start;
506 if (fd < files->next_fd)
507 fd = files->next_fd;
508
509 if (fd < fdt->max_fds)
510 fd = find_next_fd(fdt, fd);
511
512 /*
513 * N.B. For clone tasks sharing a files structure, this test
514 * will limit the total number of files that can be opened.
515 */
516 error = -EMFILE;
517 if (fd >= end)
518 goto out;
519
520 error = expand_files(files, fd);
521 if (error < 0)
522 goto out;
523
524 /*
525 * If we needed to expand the fs array we
526 * might have blocked - try again.
527 */
528 if (error)
529 goto repeat;
530
531 if (start <= files->next_fd)
532 files->next_fd = fd + 1;
533
534 __set_open_fd(fd, fdt);
535 if (flags & O_CLOEXEC)
536 __set_close_on_exec(fd, fdt);
537 else
538 __clear_close_on_exec(fd, fdt);
539 error = fd;
540 #if 1
541 /* Sanity check */
542 if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
543 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
544 rcu_assign_pointer(fdt->fd[fd], NULL);
545 }
546 #endif
547
548 out:
549 spin_unlock(&files->file_lock);
550 return error;
551 }
552
553 static int alloc_fd(unsigned start, unsigned flags)
554 {
555 return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
556 }
557
558 int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
559 {
560 return __alloc_fd(current->files, 0, nofile, flags);
561 }
562
563 int get_unused_fd_flags(unsigned flags)
564 {
565 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
566 }
567 EXPORT_SYMBOL(get_unused_fd_flags);
568
569 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
570 {
571 struct fdtable *fdt = files_fdtable(files);
572 __clear_open_fd(fd, fdt);
573 if (fd < files->next_fd)
574 files->next_fd = fd;
575 }
576
577 void put_unused_fd(unsigned int fd)
578 {
579 struct files_struct *files = current->files;
580 spin_lock(&files->file_lock);
581 __put_unused_fd(files, fd);
582 spin_unlock(&files->file_lock);
583 }
584
585 EXPORT_SYMBOL(put_unused_fd);
586
587 /*
588 * Install a file pointer in the fd array.
589 *
590 * The VFS is full of places where we drop the files lock between
591 * setting the open_fds bitmap and installing the file in the file
592 * array. At any such point, we are vulnerable to a dup2() race
593 * installing a file in the array before us. We need to detect this and
594 * fput() the struct file we are about to overwrite in this case.
595 *
596 * It should never happen - if we allow dup2() do it, _really_ bad things
597 * will follow.
598 *
599 * NOTE: __fd_install() variant is really, really low-level; don't
600 * use it unless you are forced to by truly lousy API shoved down
601 * your throat. 'files' *MUST* be either current->files or obtained
602 * by get_files_struct(current) done by whoever had given it to you,
603 * or really bad things will happen. Normally you want to use
604 * fd_install() instead.
605 */
606
607 void __fd_install(struct files_struct *files, unsigned int fd,
608 struct file *file)
609 {
610 struct fdtable *fdt;
611
612 rcu_read_lock_sched();
613
614 if (unlikely(files->resize_in_progress)) {
615 rcu_read_unlock_sched();
616 spin_lock(&files->file_lock);
617 fdt = files_fdtable(files);
618 BUG_ON(fdt->fd[fd] != NULL);
619 rcu_assign_pointer(fdt->fd[fd], file);
620 spin_unlock(&files->file_lock);
621 return;
622 }
623 /* coupled with smp_wmb() in expand_fdtable() */
624 smp_rmb();
625 fdt = rcu_dereference_sched(files->fdt);
626 BUG_ON(fdt->fd[fd] != NULL);
627 rcu_assign_pointer(fdt->fd[fd], file);
628 rcu_read_unlock_sched();
629 }
630
631 /*
632 * This consumes the "file" refcount, so callers should treat it
633 * as if they had called fput(file).
634 */
635 void fd_install(unsigned int fd, struct file *file)
636 {
637 __fd_install(current->files, fd, file);
638 }
639
640 EXPORT_SYMBOL(fd_install);
641
642 static struct file *pick_file(struct files_struct *files, unsigned fd)
643 {
644 struct file *file = NULL;
645 struct fdtable *fdt;
646
647 spin_lock(&files->file_lock);
648 fdt = files_fdtable(files);
649 if (fd >= fdt->max_fds)
650 goto out_unlock;
651 file = fdt->fd[fd];
652 if (!file)
653 goto out_unlock;
654 rcu_assign_pointer(fdt->fd[fd], NULL);
655 __put_unused_fd(files, fd);
656
657 out_unlock:
658 spin_unlock(&files->file_lock);
659 return file;
660 }
661
662 /*
663 * The same warnings as for __alloc_fd()/__fd_install() apply here...
664 */
665 int __close_fd(struct files_struct *files, unsigned fd)
666 {
667 struct file *file;
668
669 file = pick_file(files, fd);
670 if (!file)
671 return -EBADF;
672
673 return filp_close(file, files);
674 }
675 EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
676
677 /**
678 * __close_range() - Close all file descriptors in a given range.
679 *
680 * @fd: starting file descriptor to close
681 * @max_fd: last file descriptor to close
682 *
683 * This closes a range of file descriptors. All file descriptors
684 * from @fd up to and including @max_fd are closed.
685 */
686 int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
687 {
688 unsigned int cur_max;
689 struct task_struct *me = current;
690 struct files_struct *cur_fds = me->files, *fds = NULL;
691
692 if (flags & ~CLOSE_RANGE_UNSHARE)
693 return -EINVAL;
694
695 if (fd > max_fd)
696 return -EINVAL;
697
698 rcu_read_lock();
699 cur_max = files_fdtable(cur_fds)->max_fds;
700 rcu_read_unlock();
701
702 /* cap to last valid index into fdtable */
703 cur_max--;
704
705 if (flags & CLOSE_RANGE_UNSHARE) {
706 int ret;
707 unsigned int max_unshare_fds = NR_OPEN_MAX;
708
709 /*
710 * If the requested range is greater than the current maximum,
711 * we're closing everything so only copy all file descriptors
712 * beneath the lowest file descriptor.
713 */
714 if (max_fd >= cur_max)
715 max_unshare_fds = fd;
716
717 ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
718 if (ret)
719 return ret;
720
721 /*
722 * We used to share our file descriptor table, and have now
723 * created a private one, make sure we're using it below.
724 */
725 if (fds)
726 swap(cur_fds, fds);
727 }
728
729 max_fd = min(max_fd, cur_max);
730 while (fd <= max_fd) {
731 struct file *file;
732
733 file = pick_file(cur_fds, fd++);
734 if (!file)
735 continue;
736
737 filp_close(file, cur_fds);
738 cond_resched();
739 }
740
741 if (fds) {
742 /*
743 * We're done closing the files we were supposed to. Time to install
744 * the new file descriptor table and drop the old one.
745 */
746 task_lock(me);
747 me->files = cur_fds;
748 task_unlock(me);
749 put_files_struct(fds);
750 }
751
752 return 0;
753 }
754
755 /*
756 * variant of __close_fd that gets a ref on the file for later fput.
757 * The caller must ensure that filp_close() called on the file, and then
758 * an fput().
759 */
760 int __close_fd_get_file(unsigned int fd, struct file **res)
761 {
762 struct files_struct *files = current->files;
763 struct file *file;
764 struct fdtable *fdt;
765
766 spin_lock(&files->file_lock);
767 fdt = files_fdtable(files);
768 if (fd >= fdt->max_fds)
769 goto out_unlock;
770 file = fdt->fd[fd];
771 if (!file)
772 goto out_unlock;
773 rcu_assign_pointer(fdt->fd[fd], NULL);
774 __put_unused_fd(files, fd);
775 spin_unlock(&files->file_lock);
776 get_file(file);
777 *res = file;
778 return 0;
779
780 out_unlock:
781 spin_unlock(&files->file_lock);
782 *res = NULL;
783 return -ENOENT;
784 }
785
786 void do_close_on_exec(struct files_struct *files)
787 {
788 unsigned i;
789 struct fdtable *fdt;
790
791 /* exec unshares first */
792 spin_lock(&files->file_lock);
793 for (i = 0; ; i++) {
794 unsigned long set;
795 unsigned fd = i * BITS_PER_LONG;
796 fdt = files_fdtable(files);
797 if (fd >= fdt->max_fds)
798 break;
799 set = fdt->close_on_exec[i];
800 if (!set)
801 continue;
802 fdt->close_on_exec[i] = 0;
803 for ( ; set ; fd++, set >>= 1) {
804 struct file *file;
805 if (!(set & 1))
806 continue;
807 file = fdt->fd[fd];
808 if (!file)
809 continue;
810 rcu_assign_pointer(fdt->fd[fd], NULL);
811 __put_unused_fd(files, fd);
812 spin_unlock(&files->file_lock);
813 filp_close(file, files);
814 cond_resched();
815 spin_lock(&files->file_lock);
816 }
817
818 }
819 spin_unlock(&files->file_lock);
820 }
821
822 static struct file *__fget_files(struct files_struct *files, unsigned int fd,
823 fmode_t mask, unsigned int refs)
824 {
825 struct file *file;
826
827 rcu_read_lock();
828 loop:
829 file = fcheck_files(files, fd);
830 if (file) {
831 /* File object ref couldn't be taken.
832 * dup2() atomicity guarantee is the reason
833 * we loop to catch the new file (or NULL pointer)
834 */
835 if (file->f_mode & mask)
836 file = NULL;
837 else if (!get_file_rcu_many(file, refs))
838 goto loop;
839 }
840 rcu_read_unlock();
841
842 return file;
843 }
844
845 static inline struct file *__fget(unsigned int fd, fmode_t mask,
846 unsigned int refs)
847 {
848 return __fget_files(current->files, fd, mask, refs);
849 }
850
851 struct file *fget_many(unsigned int fd, unsigned int refs)
852 {
853 return __fget(fd, FMODE_PATH, refs);
854 }
855
856 struct file *fget(unsigned int fd)
857 {
858 return __fget(fd, FMODE_PATH, 1);
859 }
860 EXPORT_SYMBOL(fget);
861
862 struct file *fget_raw(unsigned int fd)
863 {
864 return __fget(fd, 0, 1);
865 }
866 EXPORT_SYMBOL(fget_raw);
867
868 struct file *fget_task(struct task_struct *task, unsigned int fd)
869 {
870 struct file *file = NULL;
871
872 task_lock(task);
873 if (task->files)
874 file = __fget_files(task->files, fd, 0, 1);
875 task_unlock(task);
876
877 return file;
878 }
879
880 /*
881 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
882 *
883 * You can use this instead of fget if you satisfy all of the following
884 * conditions:
885 * 1) You must call fput_light before exiting the syscall and returning control
886 * to userspace (i.e. you cannot remember the returned struct file * after
887 * returning to userspace).
888 * 2) You must not call filp_close on the returned struct file * in between
889 * calls to fget_light and fput_light.
890 * 3) You must not clone the current task in between the calls to fget_light
891 * and fput_light.
892 *
893 * The fput_needed flag returned by fget_light should be passed to the
894 * corresponding fput_light.
895 */
896 static unsigned long __fget_light(unsigned int fd, fmode_t mask)
897 {
898 struct files_struct *files = current->files;
899 struct file *file;
900
901 if (atomic_read(&files->count) == 1) {
902 file = __fcheck_files(files, fd);
903 if (!file || unlikely(file->f_mode & mask))
904 return 0;
905 return (unsigned long)file;
906 } else {
907 file = __fget(fd, mask, 1);
908 if (!file)
909 return 0;
910 return FDPUT_FPUT | (unsigned long)file;
911 }
912 }
913 unsigned long __fdget(unsigned int fd)
914 {
915 return __fget_light(fd, FMODE_PATH);
916 }
917 EXPORT_SYMBOL(__fdget);
918
919 unsigned long __fdget_raw(unsigned int fd)
920 {
921 return __fget_light(fd, 0);
922 }
923
924 unsigned long __fdget_pos(unsigned int fd)
925 {
926 unsigned long v = __fdget(fd);
927 struct file *file = (struct file *)(v & ~3);
928
929 if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
930 if (file_count(file) > 1) {
931 v |= FDPUT_POS_UNLOCK;
932 mutex_lock(&file->f_pos_lock);
933 }
934 }
935 return v;
936 }
937
938 void __f_unlock_pos(struct file *f)
939 {
940 mutex_unlock(&f->f_pos_lock);
941 }
942
943 /*
944 * We only lock f_pos if we have threads or if the file might be
945 * shared with another process. In both cases we'll have an elevated
946 * file count (done either by fdget() or by fork()).
947 */
948
949 void set_close_on_exec(unsigned int fd, int flag)
950 {
951 struct files_struct *files = current->files;
952 struct fdtable *fdt;
953 spin_lock(&files->file_lock);
954 fdt = files_fdtable(files);
955 if (flag)
956 __set_close_on_exec(fd, fdt);
957 else
958 __clear_close_on_exec(fd, fdt);
959 spin_unlock(&files->file_lock);
960 }
961
962 bool get_close_on_exec(unsigned int fd)
963 {
964 struct files_struct *files = current->files;
965 struct fdtable *fdt;
966 bool res;
967 rcu_read_lock();
968 fdt = files_fdtable(files);
969 res = close_on_exec(fd, fdt);
970 rcu_read_unlock();
971 return res;
972 }
973
974 static int do_dup2(struct files_struct *files,
975 struct file *file, unsigned fd, unsigned flags)
976 __releases(&files->file_lock)
977 {
978 struct file *tofree;
979 struct fdtable *fdt;
980
981 /*
982 * We need to detect attempts to do dup2() over allocated but still
983 * not finished descriptor. NB: OpenBSD avoids that at the price of
984 * extra work in their equivalent of fget() - they insert struct
985 * file immediately after grabbing descriptor, mark it larval if
986 * more work (e.g. actual opening) is needed and make sure that
987 * fget() treats larval files as absent. Potentially interesting,
988 * but while extra work in fget() is trivial, locking implications
989 * and amount of surgery on open()-related paths in VFS are not.
990 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
991 * deadlocks in rather amusing ways, AFAICS. All of that is out of
992 * scope of POSIX or SUS, since neither considers shared descriptor
993 * tables and this condition does not arise without those.
994 */
995 fdt = files_fdtable(files);
996 tofree = fdt->fd[fd];
997 if (!tofree && fd_is_open(fd, fdt))
998 goto Ebusy;
999 get_file(file);
1000 rcu_assign_pointer(fdt->fd[fd], file);
1001 __set_open_fd(fd, fdt);
1002 if (flags & O_CLOEXEC)
1003 __set_close_on_exec(fd, fdt);
1004 else
1005 __clear_close_on_exec(fd, fdt);
1006 spin_unlock(&files->file_lock);
1007
1008 if (tofree)
1009 filp_close(tofree, files);
1010
1011 return fd;
1012
1013 Ebusy:
1014 spin_unlock(&files->file_lock);
1015 return -EBUSY;
1016 }
1017
1018 int replace_fd(unsigned fd, struct file *file, unsigned flags)
1019 {
1020 int err;
1021 struct files_struct *files = current->files;
1022
1023 if (!file)
1024 return __close_fd(files, fd);
1025
1026 if (fd >= rlimit(RLIMIT_NOFILE))
1027 return -EBADF;
1028
1029 spin_lock(&files->file_lock);
1030 err = expand_files(files, fd);
1031 if (unlikely(err < 0))
1032 goto out_unlock;
1033 return do_dup2(files, file, fd, flags);
1034
1035 out_unlock:
1036 spin_unlock(&files->file_lock);
1037 return err;
1038 }
1039
1040 /**
1041 * __receive_fd() - Install received file into file descriptor table
1042 *
1043 * @fd: fd to install into (if negative, a new fd will be allocated)
1044 * @file: struct file that was received from another process
1045 * @ufd: __user pointer to write new fd number to
1046 * @o_flags: the O_* flags to apply to the new fd entry
1047 *
1048 * Installs a received file into the file descriptor table, with appropriate
1049 * checks and count updates. Optionally writes the fd number to userspace, if
1050 * @ufd is non-NULL.
1051 *
1052 * This helper handles its own reference counting of the incoming
1053 * struct file.
1054 *
1055 * Returns newly install fd or -ve on error.
1056 */
1057 int __receive_fd(int fd, struct file *file, int __user *ufd, unsigned int o_flags)
1058 {
1059 int new_fd;
1060 int error;
1061
1062 error = security_file_receive(file);
1063 if (error)
1064 return error;
1065
1066 if (fd < 0) {
1067 new_fd = get_unused_fd_flags(o_flags);
1068 if (new_fd < 0)
1069 return new_fd;
1070 } else {
1071 new_fd = fd;
1072 }
1073
1074 if (ufd) {
1075 error = put_user(new_fd, ufd);
1076 if (error) {
1077 if (fd < 0)
1078 put_unused_fd(new_fd);
1079 return error;
1080 }
1081 }
1082
1083 if (fd < 0) {
1084 fd_install(new_fd, get_file(file));
1085 } else {
1086 error = replace_fd(new_fd, file, o_flags);
1087 if (error)
1088 return error;
1089 }
1090
1091 /* Bump the sock usage counts, if any. */
1092 __receive_sock(file);
1093 return new_fd;
1094 }
1095
1096 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1097 {
1098 int err = -EBADF;
1099 struct file *file;
1100 struct files_struct *files = current->files;
1101
1102 if ((flags & ~O_CLOEXEC) != 0)
1103 return -EINVAL;
1104
1105 if (unlikely(oldfd == newfd))
1106 return -EINVAL;
1107
1108 if (newfd >= rlimit(RLIMIT_NOFILE))
1109 return -EBADF;
1110
1111 spin_lock(&files->file_lock);
1112 err = expand_files(files, newfd);
1113 file = fcheck(oldfd);
1114 if (unlikely(!file))
1115 goto Ebadf;
1116 if (unlikely(err < 0)) {
1117 if (err == -EMFILE)
1118 goto Ebadf;
1119 goto out_unlock;
1120 }
1121 return do_dup2(files, file, newfd, flags);
1122
1123 Ebadf:
1124 err = -EBADF;
1125 out_unlock:
1126 spin_unlock(&files->file_lock);
1127 return err;
1128 }
1129
1130 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1131 {
1132 return ksys_dup3(oldfd, newfd, flags);
1133 }
1134
1135 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1136 {
1137 if (unlikely(newfd == oldfd)) { /* corner case */
1138 struct files_struct *files = current->files;
1139 int retval = oldfd;
1140
1141 rcu_read_lock();
1142 if (!fcheck_files(files, oldfd))
1143 retval = -EBADF;
1144 rcu_read_unlock();
1145 return retval;
1146 }
1147 return ksys_dup3(oldfd, newfd, 0);
1148 }
1149
1150 SYSCALL_DEFINE1(dup, unsigned int, fildes)
1151 {
1152 int ret = -EBADF;
1153 struct file *file = fget_raw(fildes);
1154
1155 if (file) {
1156 ret = get_unused_fd_flags(0);
1157 if (ret >= 0)
1158 fd_install(ret, file);
1159 else
1160 fput(file);
1161 }
1162 return ret;
1163 }
1164
1165 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1166 {
1167 int err;
1168 if (from >= rlimit(RLIMIT_NOFILE))
1169 return -EINVAL;
1170 err = alloc_fd(from, flags);
1171 if (err >= 0) {
1172 get_file(file);
1173 fd_install(err, file);
1174 }
1175 return err;
1176 }
1177
1178 int iterate_fd(struct files_struct *files, unsigned n,
1179 int (*f)(const void *, struct file *, unsigned),
1180 const void *p)
1181 {
1182 struct fdtable *fdt;
1183 int res = 0;
1184 if (!files)
1185 return 0;
1186 spin_lock(&files->file_lock);
1187 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1188 struct file *file;
1189 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1190 if (!file)
1191 continue;
1192 res = f(p, file, n);
1193 if (res)
1194 break;
1195 }
1196 spin_unlock(&files->file_lock);
1197 return res;
1198 }
1199 EXPORT_SYMBOL(iterate_fd);