]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/file.c
Merge tag 'platform-drivers-x86-v5.11-2' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-hirsute-kernel.git] / fs / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/file.c
4 *
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 *
7 * Manage the dynamic fd arrays in the process files_struct.
8 */
9
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
12 #include <linux/fs.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/close_range.h>
23 #include <net/sock.h>
24
25 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
26 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
27 /* our min() is unusable in constant expressions ;-/ */
28 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
29 unsigned int sysctl_nr_open_max =
30 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
31
32 static void __free_fdtable(struct fdtable *fdt)
33 {
34 kvfree(fdt->fd);
35 kvfree(fdt->open_fds);
36 kfree(fdt);
37 }
38
39 static void free_fdtable_rcu(struct rcu_head *rcu)
40 {
41 __free_fdtable(container_of(rcu, struct fdtable, rcu));
42 }
43
44 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
45 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
46
47 /*
48 * Copy 'count' fd bits from the old table to the new table and clear the extra
49 * space if any. This does not copy the file pointers. Called with the files
50 * spinlock held for write.
51 */
52 static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
53 unsigned int count)
54 {
55 unsigned int cpy, set;
56
57 cpy = count / BITS_PER_BYTE;
58 set = (nfdt->max_fds - count) / BITS_PER_BYTE;
59 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
60 memset((char *)nfdt->open_fds + cpy, 0, set);
61 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
62 memset((char *)nfdt->close_on_exec + cpy, 0, set);
63
64 cpy = BITBIT_SIZE(count);
65 set = BITBIT_SIZE(nfdt->max_fds) - cpy;
66 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
67 memset((char *)nfdt->full_fds_bits + cpy, 0, set);
68 }
69
70 /*
71 * Copy all file descriptors from the old table to the new, expanded table and
72 * clear the extra space. Called with the files spinlock held for write.
73 */
74 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
75 {
76 size_t cpy, set;
77
78 BUG_ON(nfdt->max_fds < ofdt->max_fds);
79
80 cpy = ofdt->max_fds * sizeof(struct file *);
81 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
82 memcpy(nfdt->fd, ofdt->fd, cpy);
83 memset((char *)nfdt->fd + cpy, 0, set);
84
85 copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
86 }
87
88 static struct fdtable * alloc_fdtable(unsigned int nr)
89 {
90 struct fdtable *fdt;
91 void *data;
92
93 /*
94 * Figure out how many fds we actually want to support in this fdtable.
95 * Allocation steps are keyed to the size of the fdarray, since it
96 * grows far faster than any of the other dynamic data. We try to fit
97 * the fdarray into comfortable page-tuned chunks: starting at 1024B
98 * and growing in powers of two from there on.
99 */
100 nr /= (1024 / sizeof(struct file *));
101 nr = roundup_pow_of_two(nr + 1);
102 nr *= (1024 / sizeof(struct file *));
103 /*
104 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
105 * had been set lower between the check in expand_files() and here. Deal
106 * with that in caller, it's cheaper that way.
107 *
108 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
109 * bitmaps handling below becomes unpleasant, to put it mildly...
110 */
111 if (unlikely(nr > sysctl_nr_open))
112 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
113
114 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
115 if (!fdt)
116 goto out;
117 fdt->max_fds = nr;
118 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
119 if (!data)
120 goto out_fdt;
121 fdt->fd = data;
122
123 data = kvmalloc(max_t(size_t,
124 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
125 GFP_KERNEL_ACCOUNT);
126 if (!data)
127 goto out_arr;
128 fdt->open_fds = data;
129 data += nr / BITS_PER_BYTE;
130 fdt->close_on_exec = data;
131 data += nr / BITS_PER_BYTE;
132 fdt->full_fds_bits = data;
133
134 return fdt;
135
136 out_arr:
137 kvfree(fdt->fd);
138 out_fdt:
139 kfree(fdt);
140 out:
141 return NULL;
142 }
143
144 /*
145 * Expand the file descriptor table.
146 * This function will allocate a new fdtable and both fd array and fdset, of
147 * the given size.
148 * Return <0 error code on error; 1 on successful completion.
149 * The files->file_lock should be held on entry, and will be held on exit.
150 */
151 static int expand_fdtable(struct files_struct *files, unsigned int nr)
152 __releases(files->file_lock)
153 __acquires(files->file_lock)
154 {
155 struct fdtable *new_fdt, *cur_fdt;
156
157 spin_unlock(&files->file_lock);
158 new_fdt = alloc_fdtable(nr);
159
160 /* make sure all fd_install() have seen resize_in_progress
161 * or have finished their rcu_read_lock_sched() section.
162 */
163 if (atomic_read(&files->count) > 1)
164 synchronize_rcu();
165
166 spin_lock(&files->file_lock);
167 if (!new_fdt)
168 return -ENOMEM;
169 /*
170 * extremely unlikely race - sysctl_nr_open decreased between the check in
171 * caller and alloc_fdtable(). Cheaper to catch it here...
172 */
173 if (unlikely(new_fdt->max_fds <= nr)) {
174 __free_fdtable(new_fdt);
175 return -EMFILE;
176 }
177 cur_fdt = files_fdtable(files);
178 BUG_ON(nr < cur_fdt->max_fds);
179 copy_fdtable(new_fdt, cur_fdt);
180 rcu_assign_pointer(files->fdt, new_fdt);
181 if (cur_fdt != &files->fdtab)
182 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
183 /* coupled with smp_rmb() in fd_install() */
184 smp_wmb();
185 return 1;
186 }
187
188 /*
189 * Expand files.
190 * This function will expand the file structures, if the requested size exceeds
191 * the current capacity and there is room for expansion.
192 * Return <0 error code on error; 0 when nothing done; 1 when files were
193 * expanded and execution may have blocked.
194 * The files->file_lock should be held on entry, and will be held on exit.
195 */
196 static int expand_files(struct files_struct *files, unsigned int nr)
197 __releases(files->file_lock)
198 __acquires(files->file_lock)
199 {
200 struct fdtable *fdt;
201 int expanded = 0;
202
203 repeat:
204 fdt = files_fdtable(files);
205
206 /* Do we need to expand? */
207 if (nr < fdt->max_fds)
208 return expanded;
209
210 /* Can we expand? */
211 if (nr >= sysctl_nr_open)
212 return -EMFILE;
213
214 if (unlikely(files->resize_in_progress)) {
215 spin_unlock(&files->file_lock);
216 expanded = 1;
217 wait_event(files->resize_wait, !files->resize_in_progress);
218 spin_lock(&files->file_lock);
219 goto repeat;
220 }
221
222 /* All good, so we try */
223 files->resize_in_progress = true;
224 expanded = expand_fdtable(files, nr);
225 files->resize_in_progress = false;
226
227 wake_up_all(&files->resize_wait);
228 return expanded;
229 }
230
231 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
232 {
233 __set_bit(fd, fdt->close_on_exec);
234 }
235
236 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
237 {
238 if (test_bit(fd, fdt->close_on_exec))
239 __clear_bit(fd, fdt->close_on_exec);
240 }
241
242 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
243 {
244 __set_bit(fd, fdt->open_fds);
245 fd /= BITS_PER_LONG;
246 if (!~fdt->open_fds[fd])
247 __set_bit(fd, fdt->full_fds_bits);
248 }
249
250 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
251 {
252 __clear_bit(fd, fdt->open_fds);
253 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
254 }
255
256 static unsigned int count_open_files(struct fdtable *fdt)
257 {
258 unsigned int size = fdt->max_fds;
259 unsigned int i;
260
261 /* Find the last open fd */
262 for (i = size / BITS_PER_LONG; i > 0; ) {
263 if (fdt->open_fds[--i])
264 break;
265 }
266 i = (i + 1) * BITS_PER_LONG;
267 return i;
268 }
269
270 static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
271 {
272 unsigned int count;
273
274 count = count_open_files(fdt);
275 if (max_fds < NR_OPEN_DEFAULT)
276 max_fds = NR_OPEN_DEFAULT;
277 return min(count, max_fds);
278 }
279
280 /*
281 * Allocate a new files structure and copy contents from the
282 * passed in files structure.
283 * errorp will be valid only when the returned files_struct is NULL.
284 */
285 struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp)
286 {
287 struct files_struct *newf;
288 struct file **old_fds, **new_fds;
289 unsigned int open_files, i;
290 struct fdtable *old_fdt, *new_fdt;
291
292 *errorp = -ENOMEM;
293 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
294 if (!newf)
295 goto out;
296
297 atomic_set(&newf->count, 1);
298
299 spin_lock_init(&newf->file_lock);
300 newf->resize_in_progress = false;
301 init_waitqueue_head(&newf->resize_wait);
302 newf->next_fd = 0;
303 new_fdt = &newf->fdtab;
304 new_fdt->max_fds = NR_OPEN_DEFAULT;
305 new_fdt->close_on_exec = newf->close_on_exec_init;
306 new_fdt->open_fds = newf->open_fds_init;
307 new_fdt->full_fds_bits = newf->full_fds_bits_init;
308 new_fdt->fd = &newf->fd_array[0];
309
310 spin_lock(&oldf->file_lock);
311 old_fdt = files_fdtable(oldf);
312 open_files = sane_fdtable_size(old_fdt, max_fds);
313
314 /*
315 * Check whether we need to allocate a larger fd array and fd set.
316 */
317 while (unlikely(open_files > new_fdt->max_fds)) {
318 spin_unlock(&oldf->file_lock);
319
320 if (new_fdt != &newf->fdtab)
321 __free_fdtable(new_fdt);
322
323 new_fdt = alloc_fdtable(open_files - 1);
324 if (!new_fdt) {
325 *errorp = -ENOMEM;
326 goto out_release;
327 }
328
329 /* beyond sysctl_nr_open; nothing to do */
330 if (unlikely(new_fdt->max_fds < open_files)) {
331 __free_fdtable(new_fdt);
332 *errorp = -EMFILE;
333 goto out_release;
334 }
335
336 /*
337 * Reacquire the oldf lock and a pointer to its fd table
338 * who knows it may have a new bigger fd table. We need
339 * the latest pointer.
340 */
341 spin_lock(&oldf->file_lock);
342 old_fdt = files_fdtable(oldf);
343 open_files = sane_fdtable_size(old_fdt, max_fds);
344 }
345
346 copy_fd_bitmaps(new_fdt, old_fdt, open_files);
347
348 old_fds = old_fdt->fd;
349 new_fds = new_fdt->fd;
350
351 for (i = open_files; i != 0; i--) {
352 struct file *f = *old_fds++;
353 if (f) {
354 get_file(f);
355 } else {
356 /*
357 * The fd may be claimed in the fd bitmap but not yet
358 * instantiated in the files array if a sibling thread
359 * is partway through open(). So make sure that this
360 * fd is available to the new process.
361 */
362 __clear_open_fd(open_files - i, new_fdt);
363 }
364 rcu_assign_pointer(*new_fds++, f);
365 }
366 spin_unlock(&oldf->file_lock);
367
368 /* clear the remainder */
369 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
370
371 rcu_assign_pointer(newf->fdt, new_fdt);
372
373 return newf;
374
375 out_release:
376 kmem_cache_free(files_cachep, newf);
377 out:
378 return NULL;
379 }
380
381 static struct fdtable *close_files(struct files_struct * files)
382 {
383 /*
384 * It is safe to dereference the fd table without RCU or
385 * ->file_lock because this is the last reference to the
386 * files structure.
387 */
388 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
389 unsigned int i, j = 0;
390
391 for (;;) {
392 unsigned long set;
393 i = j * BITS_PER_LONG;
394 if (i >= fdt->max_fds)
395 break;
396 set = fdt->open_fds[j++];
397 while (set) {
398 if (set & 1) {
399 struct file * file = xchg(&fdt->fd[i], NULL);
400 if (file) {
401 filp_close(file, files);
402 cond_resched();
403 }
404 }
405 i++;
406 set >>= 1;
407 }
408 }
409
410 return fdt;
411 }
412
413 void put_files_struct(struct files_struct *files)
414 {
415 if (atomic_dec_and_test(&files->count)) {
416 struct fdtable *fdt = close_files(files);
417
418 /* free the arrays if they are not embedded */
419 if (fdt != &files->fdtab)
420 __free_fdtable(fdt);
421 kmem_cache_free(files_cachep, files);
422 }
423 }
424
425 void exit_files(struct task_struct *tsk)
426 {
427 struct files_struct * files = tsk->files;
428
429 if (files) {
430 task_lock(tsk);
431 tsk->files = NULL;
432 task_unlock(tsk);
433 put_files_struct(files);
434 }
435 }
436
437 struct files_struct init_files = {
438 .count = ATOMIC_INIT(1),
439 .fdt = &init_files.fdtab,
440 .fdtab = {
441 .max_fds = NR_OPEN_DEFAULT,
442 .fd = &init_files.fd_array[0],
443 .close_on_exec = init_files.close_on_exec_init,
444 .open_fds = init_files.open_fds_init,
445 .full_fds_bits = init_files.full_fds_bits_init,
446 },
447 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
448 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
449 };
450
451 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
452 {
453 unsigned int maxfd = fdt->max_fds;
454 unsigned int maxbit = maxfd / BITS_PER_LONG;
455 unsigned int bitbit = start / BITS_PER_LONG;
456
457 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
458 if (bitbit > maxfd)
459 return maxfd;
460 if (bitbit > start)
461 start = bitbit;
462 return find_next_zero_bit(fdt->open_fds, maxfd, start);
463 }
464
465 /*
466 * allocate a file descriptor, mark it busy.
467 */
468 static int alloc_fd(unsigned start, unsigned end, unsigned flags)
469 {
470 struct files_struct *files = current->files;
471 unsigned int fd;
472 int error;
473 struct fdtable *fdt;
474
475 spin_lock(&files->file_lock);
476 repeat:
477 fdt = files_fdtable(files);
478 fd = start;
479 if (fd < files->next_fd)
480 fd = files->next_fd;
481
482 if (fd < fdt->max_fds)
483 fd = find_next_fd(fdt, fd);
484
485 /*
486 * N.B. For clone tasks sharing a files structure, this test
487 * will limit the total number of files that can be opened.
488 */
489 error = -EMFILE;
490 if (fd >= end)
491 goto out;
492
493 error = expand_files(files, fd);
494 if (error < 0)
495 goto out;
496
497 /*
498 * If we needed to expand the fs array we
499 * might have blocked - try again.
500 */
501 if (error)
502 goto repeat;
503
504 if (start <= files->next_fd)
505 files->next_fd = fd + 1;
506
507 __set_open_fd(fd, fdt);
508 if (flags & O_CLOEXEC)
509 __set_close_on_exec(fd, fdt);
510 else
511 __clear_close_on_exec(fd, fdt);
512 error = fd;
513 #if 1
514 /* Sanity check */
515 if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
516 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
517 rcu_assign_pointer(fdt->fd[fd], NULL);
518 }
519 #endif
520
521 out:
522 spin_unlock(&files->file_lock);
523 return error;
524 }
525
526 int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
527 {
528 return alloc_fd(0, nofile, flags);
529 }
530
531 int get_unused_fd_flags(unsigned flags)
532 {
533 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
534 }
535 EXPORT_SYMBOL(get_unused_fd_flags);
536
537 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
538 {
539 struct fdtable *fdt = files_fdtable(files);
540 __clear_open_fd(fd, fdt);
541 if (fd < files->next_fd)
542 files->next_fd = fd;
543 }
544
545 void put_unused_fd(unsigned int fd)
546 {
547 struct files_struct *files = current->files;
548 spin_lock(&files->file_lock);
549 __put_unused_fd(files, fd);
550 spin_unlock(&files->file_lock);
551 }
552
553 EXPORT_SYMBOL(put_unused_fd);
554
555 /*
556 * Install a file pointer in the fd array.
557 *
558 * The VFS is full of places where we drop the files lock between
559 * setting the open_fds bitmap and installing the file in the file
560 * array. At any such point, we are vulnerable to a dup2() race
561 * installing a file in the array before us. We need to detect this and
562 * fput() the struct file we are about to overwrite in this case.
563 *
564 * It should never happen - if we allow dup2() do it, _really_ bad things
565 * will follow.
566 *
567 * This consumes the "file" refcount, so callers should treat it
568 * as if they had called fput(file).
569 */
570
571 void fd_install(unsigned int fd, struct file *file)
572 {
573 struct files_struct *files = current->files;
574 struct fdtable *fdt;
575
576 rcu_read_lock_sched();
577
578 if (unlikely(files->resize_in_progress)) {
579 rcu_read_unlock_sched();
580 spin_lock(&files->file_lock);
581 fdt = files_fdtable(files);
582 BUG_ON(fdt->fd[fd] != NULL);
583 rcu_assign_pointer(fdt->fd[fd], file);
584 spin_unlock(&files->file_lock);
585 return;
586 }
587 /* coupled with smp_wmb() in expand_fdtable() */
588 smp_rmb();
589 fdt = rcu_dereference_sched(files->fdt);
590 BUG_ON(fdt->fd[fd] != NULL);
591 rcu_assign_pointer(fdt->fd[fd], file);
592 rcu_read_unlock_sched();
593 }
594
595 EXPORT_SYMBOL(fd_install);
596
597 static struct file *pick_file(struct files_struct *files, unsigned fd)
598 {
599 struct file *file = NULL;
600 struct fdtable *fdt;
601
602 spin_lock(&files->file_lock);
603 fdt = files_fdtable(files);
604 if (fd >= fdt->max_fds)
605 goto out_unlock;
606 file = fdt->fd[fd];
607 if (!file)
608 goto out_unlock;
609 rcu_assign_pointer(fdt->fd[fd], NULL);
610 __put_unused_fd(files, fd);
611
612 out_unlock:
613 spin_unlock(&files->file_lock);
614 return file;
615 }
616
617 int close_fd(unsigned fd)
618 {
619 struct files_struct *files = current->files;
620 struct file *file;
621
622 file = pick_file(files, fd);
623 if (!file)
624 return -EBADF;
625
626 return filp_close(file, files);
627 }
628 EXPORT_SYMBOL(close_fd); /* for ksys_close() */
629
630 static inline void __range_cloexec(struct files_struct *cur_fds,
631 unsigned int fd, unsigned int max_fd)
632 {
633 struct fdtable *fdt;
634
635 if (fd > max_fd)
636 return;
637
638 spin_lock(&cur_fds->file_lock);
639 fdt = files_fdtable(cur_fds);
640 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
641 spin_unlock(&cur_fds->file_lock);
642 }
643
644 static inline void __range_close(struct files_struct *cur_fds, unsigned int fd,
645 unsigned int max_fd)
646 {
647 while (fd <= max_fd) {
648 struct file *file;
649
650 file = pick_file(cur_fds, fd++);
651 if (!file)
652 continue;
653
654 filp_close(file, cur_fds);
655 cond_resched();
656 }
657 }
658
659 /**
660 * __close_range() - Close all file descriptors in a given range.
661 *
662 * @fd: starting file descriptor to close
663 * @max_fd: last file descriptor to close
664 *
665 * This closes a range of file descriptors. All file descriptors
666 * from @fd up to and including @max_fd are closed.
667 */
668 int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
669 {
670 unsigned int cur_max;
671 struct task_struct *me = current;
672 struct files_struct *cur_fds = me->files, *fds = NULL;
673
674 if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
675 return -EINVAL;
676
677 if (fd > max_fd)
678 return -EINVAL;
679
680 rcu_read_lock();
681 cur_max = files_fdtable(cur_fds)->max_fds;
682 rcu_read_unlock();
683
684 /* cap to last valid index into fdtable */
685 cur_max--;
686
687 if (flags & CLOSE_RANGE_UNSHARE) {
688 int ret;
689 unsigned int max_unshare_fds = NR_OPEN_MAX;
690
691 /*
692 * If the requested range is greater than the current maximum,
693 * we're closing everything so only copy all file descriptors
694 * beneath the lowest file descriptor.
695 * If the caller requested all fds to be made cloexec copy all
696 * of the file descriptors since they still want to use them.
697 */
698 if (!(flags & CLOSE_RANGE_CLOEXEC) && (max_fd >= cur_max))
699 max_unshare_fds = fd;
700
701 ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
702 if (ret)
703 return ret;
704
705 /*
706 * We used to share our file descriptor table, and have now
707 * created a private one, make sure we're using it below.
708 */
709 if (fds)
710 swap(cur_fds, fds);
711 }
712
713 max_fd = min(max_fd, cur_max);
714
715 if (flags & CLOSE_RANGE_CLOEXEC)
716 __range_cloexec(cur_fds, fd, max_fd);
717 else
718 __range_close(cur_fds, fd, max_fd);
719
720 if (fds) {
721 /*
722 * We're done closing the files we were supposed to. Time to install
723 * the new file descriptor table and drop the old one.
724 */
725 task_lock(me);
726 me->files = cur_fds;
727 task_unlock(me);
728 put_files_struct(fds);
729 }
730
731 return 0;
732 }
733
734 /*
735 * variant of close_fd that gets a ref on the file for later fput.
736 * The caller must ensure that filp_close() called on the file, and then
737 * an fput().
738 */
739 int close_fd_get_file(unsigned int fd, struct file **res)
740 {
741 struct files_struct *files = current->files;
742 struct file *file;
743 struct fdtable *fdt;
744
745 spin_lock(&files->file_lock);
746 fdt = files_fdtable(files);
747 if (fd >= fdt->max_fds)
748 goto out_unlock;
749 file = fdt->fd[fd];
750 if (!file)
751 goto out_unlock;
752 rcu_assign_pointer(fdt->fd[fd], NULL);
753 __put_unused_fd(files, fd);
754 spin_unlock(&files->file_lock);
755 get_file(file);
756 *res = file;
757 return 0;
758
759 out_unlock:
760 spin_unlock(&files->file_lock);
761 *res = NULL;
762 return -ENOENT;
763 }
764
765 void do_close_on_exec(struct files_struct *files)
766 {
767 unsigned i;
768 struct fdtable *fdt;
769
770 /* exec unshares first */
771 spin_lock(&files->file_lock);
772 for (i = 0; ; i++) {
773 unsigned long set;
774 unsigned fd = i * BITS_PER_LONG;
775 fdt = files_fdtable(files);
776 if (fd >= fdt->max_fds)
777 break;
778 set = fdt->close_on_exec[i];
779 if (!set)
780 continue;
781 fdt->close_on_exec[i] = 0;
782 for ( ; set ; fd++, set >>= 1) {
783 struct file *file;
784 if (!(set & 1))
785 continue;
786 file = fdt->fd[fd];
787 if (!file)
788 continue;
789 rcu_assign_pointer(fdt->fd[fd], NULL);
790 __put_unused_fd(files, fd);
791 spin_unlock(&files->file_lock);
792 filp_close(file, files);
793 cond_resched();
794 spin_lock(&files->file_lock);
795 }
796
797 }
798 spin_unlock(&files->file_lock);
799 }
800
801 static struct file *__fget_files(struct files_struct *files, unsigned int fd,
802 fmode_t mask, unsigned int refs)
803 {
804 struct file *file;
805
806 rcu_read_lock();
807 loop:
808 file = files_lookup_fd_rcu(files, fd);
809 if (file) {
810 /* File object ref couldn't be taken.
811 * dup2() atomicity guarantee is the reason
812 * we loop to catch the new file (or NULL pointer)
813 */
814 if (file->f_mode & mask)
815 file = NULL;
816 else if (!get_file_rcu_many(file, refs))
817 goto loop;
818 }
819 rcu_read_unlock();
820
821 return file;
822 }
823
824 static inline struct file *__fget(unsigned int fd, fmode_t mask,
825 unsigned int refs)
826 {
827 return __fget_files(current->files, fd, mask, refs);
828 }
829
830 struct file *fget_many(unsigned int fd, unsigned int refs)
831 {
832 return __fget(fd, FMODE_PATH, refs);
833 }
834
835 struct file *fget(unsigned int fd)
836 {
837 return __fget(fd, FMODE_PATH, 1);
838 }
839 EXPORT_SYMBOL(fget);
840
841 struct file *fget_raw(unsigned int fd)
842 {
843 return __fget(fd, 0, 1);
844 }
845 EXPORT_SYMBOL(fget_raw);
846
847 struct file *fget_task(struct task_struct *task, unsigned int fd)
848 {
849 struct file *file = NULL;
850
851 task_lock(task);
852 if (task->files)
853 file = __fget_files(task->files, fd, 0, 1);
854 task_unlock(task);
855
856 return file;
857 }
858
859 struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd)
860 {
861 /* Must be called with rcu_read_lock held */
862 struct files_struct *files;
863 struct file *file = NULL;
864
865 task_lock(task);
866 files = task->files;
867 if (files)
868 file = files_lookup_fd_rcu(files, fd);
869 task_unlock(task);
870
871 return file;
872 }
873
874 struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret_fd)
875 {
876 /* Must be called with rcu_read_lock held */
877 struct files_struct *files;
878 unsigned int fd = *ret_fd;
879 struct file *file = NULL;
880
881 task_lock(task);
882 files = task->files;
883 if (files) {
884 for (; fd < files_fdtable(files)->max_fds; fd++) {
885 file = files_lookup_fd_rcu(files, fd);
886 if (file)
887 break;
888 }
889 }
890 task_unlock(task);
891 *ret_fd = fd;
892 return file;
893 }
894
895 /*
896 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
897 *
898 * You can use this instead of fget if you satisfy all of the following
899 * conditions:
900 * 1) You must call fput_light before exiting the syscall and returning control
901 * to userspace (i.e. you cannot remember the returned struct file * after
902 * returning to userspace).
903 * 2) You must not call filp_close on the returned struct file * in between
904 * calls to fget_light and fput_light.
905 * 3) You must not clone the current task in between the calls to fget_light
906 * and fput_light.
907 *
908 * The fput_needed flag returned by fget_light should be passed to the
909 * corresponding fput_light.
910 */
911 static unsigned long __fget_light(unsigned int fd, fmode_t mask)
912 {
913 struct files_struct *files = current->files;
914 struct file *file;
915
916 if (atomic_read(&files->count) == 1) {
917 file = files_lookup_fd_raw(files, fd);
918 if (!file || unlikely(file->f_mode & mask))
919 return 0;
920 return (unsigned long)file;
921 } else {
922 file = __fget(fd, mask, 1);
923 if (!file)
924 return 0;
925 return FDPUT_FPUT | (unsigned long)file;
926 }
927 }
928 unsigned long __fdget(unsigned int fd)
929 {
930 return __fget_light(fd, FMODE_PATH);
931 }
932 EXPORT_SYMBOL(__fdget);
933
934 unsigned long __fdget_raw(unsigned int fd)
935 {
936 return __fget_light(fd, 0);
937 }
938
939 unsigned long __fdget_pos(unsigned int fd)
940 {
941 unsigned long v = __fdget(fd);
942 struct file *file = (struct file *)(v & ~3);
943
944 if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
945 if (file_count(file) > 1) {
946 v |= FDPUT_POS_UNLOCK;
947 mutex_lock(&file->f_pos_lock);
948 }
949 }
950 return v;
951 }
952
953 void __f_unlock_pos(struct file *f)
954 {
955 mutex_unlock(&f->f_pos_lock);
956 }
957
958 /*
959 * We only lock f_pos if we have threads or if the file might be
960 * shared with another process. In both cases we'll have an elevated
961 * file count (done either by fdget() or by fork()).
962 */
963
964 void set_close_on_exec(unsigned int fd, int flag)
965 {
966 struct files_struct *files = current->files;
967 struct fdtable *fdt;
968 spin_lock(&files->file_lock);
969 fdt = files_fdtable(files);
970 if (flag)
971 __set_close_on_exec(fd, fdt);
972 else
973 __clear_close_on_exec(fd, fdt);
974 spin_unlock(&files->file_lock);
975 }
976
977 bool get_close_on_exec(unsigned int fd)
978 {
979 struct files_struct *files = current->files;
980 struct fdtable *fdt;
981 bool res;
982 rcu_read_lock();
983 fdt = files_fdtable(files);
984 res = close_on_exec(fd, fdt);
985 rcu_read_unlock();
986 return res;
987 }
988
989 static int do_dup2(struct files_struct *files,
990 struct file *file, unsigned fd, unsigned flags)
991 __releases(&files->file_lock)
992 {
993 struct file *tofree;
994 struct fdtable *fdt;
995
996 /*
997 * We need to detect attempts to do dup2() over allocated but still
998 * not finished descriptor. NB: OpenBSD avoids that at the price of
999 * extra work in their equivalent of fget() - they insert struct
1000 * file immediately after grabbing descriptor, mark it larval if
1001 * more work (e.g. actual opening) is needed and make sure that
1002 * fget() treats larval files as absent. Potentially interesting,
1003 * but while extra work in fget() is trivial, locking implications
1004 * and amount of surgery on open()-related paths in VFS are not.
1005 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1006 * deadlocks in rather amusing ways, AFAICS. All of that is out of
1007 * scope of POSIX or SUS, since neither considers shared descriptor
1008 * tables and this condition does not arise without those.
1009 */
1010 fdt = files_fdtable(files);
1011 tofree = fdt->fd[fd];
1012 if (!tofree && fd_is_open(fd, fdt))
1013 goto Ebusy;
1014 get_file(file);
1015 rcu_assign_pointer(fdt->fd[fd], file);
1016 __set_open_fd(fd, fdt);
1017 if (flags & O_CLOEXEC)
1018 __set_close_on_exec(fd, fdt);
1019 else
1020 __clear_close_on_exec(fd, fdt);
1021 spin_unlock(&files->file_lock);
1022
1023 if (tofree)
1024 filp_close(tofree, files);
1025
1026 return fd;
1027
1028 Ebusy:
1029 spin_unlock(&files->file_lock);
1030 return -EBUSY;
1031 }
1032
1033 int replace_fd(unsigned fd, struct file *file, unsigned flags)
1034 {
1035 int err;
1036 struct files_struct *files = current->files;
1037
1038 if (!file)
1039 return close_fd(fd);
1040
1041 if (fd >= rlimit(RLIMIT_NOFILE))
1042 return -EBADF;
1043
1044 spin_lock(&files->file_lock);
1045 err = expand_files(files, fd);
1046 if (unlikely(err < 0))
1047 goto out_unlock;
1048 return do_dup2(files, file, fd, flags);
1049
1050 out_unlock:
1051 spin_unlock(&files->file_lock);
1052 return err;
1053 }
1054
1055 /**
1056 * __receive_fd() - Install received file into file descriptor table
1057 *
1058 * @fd: fd to install into (if negative, a new fd will be allocated)
1059 * @file: struct file that was received from another process
1060 * @ufd: __user pointer to write new fd number to
1061 * @o_flags: the O_* flags to apply to the new fd entry
1062 *
1063 * Installs a received file into the file descriptor table, with appropriate
1064 * checks and count updates. Optionally writes the fd number to userspace, if
1065 * @ufd is non-NULL.
1066 *
1067 * This helper handles its own reference counting of the incoming
1068 * struct file.
1069 *
1070 * Returns newly install fd or -ve on error.
1071 */
1072 int __receive_fd(int fd, struct file *file, int __user *ufd, unsigned int o_flags)
1073 {
1074 int new_fd;
1075 int error;
1076
1077 error = security_file_receive(file);
1078 if (error)
1079 return error;
1080
1081 if (fd < 0) {
1082 new_fd = get_unused_fd_flags(o_flags);
1083 if (new_fd < 0)
1084 return new_fd;
1085 } else {
1086 new_fd = fd;
1087 }
1088
1089 if (ufd) {
1090 error = put_user(new_fd, ufd);
1091 if (error) {
1092 if (fd < 0)
1093 put_unused_fd(new_fd);
1094 return error;
1095 }
1096 }
1097
1098 if (fd < 0) {
1099 fd_install(new_fd, get_file(file));
1100 } else {
1101 error = replace_fd(new_fd, file, o_flags);
1102 if (error)
1103 return error;
1104 }
1105
1106 /* Bump the sock usage counts, if any. */
1107 __receive_sock(file);
1108 return new_fd;
1109 }
1110
1111 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1112 {
1113 int err = -EBADF;
1114 struct file *file;
1115 struct files_struct *files = current->files;
1116
1117 if ((flags & ~O_CLOEXEC) != 0)
1118 return -EINVAL;
1119
1120 if (unlikely(oldfd == newfd))
1121 return -EINVAL;
1122
1123 if (newfd >= rlimit(RLIMIT_NOFILE))
1124 return -EBADF;
1125
1126 spin_lock(&files->file_lock);
1127 err = expand_files(files, newfd);
1128 file = files_lookup_fd_locked(files, oldfd);
1129 if (unlikely(!file))
1130 goto Ebadf;
1131 if (unlikely(err < 0)) {
1132 if (err == -EMFILE)
1133 goto Ebadf;
1134 goto out_unlock;
1135 }
1136 return do_dup2(files, file, newfd, flags);
1137
1138 Ebadf:
1139 err = -EBADF;
1140 out_unlock:
1141 spin_unlock(&files->file_lock);
1142 return err;
1143 }
1144
1145 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1146 {
1147 return ksys_dup3(oldfd, newfd, flags);
1148 }
1149
1150 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1151 {
1152 if (unlikely(newfd == oldfd)) { /* corner case */
1153 struct files_struct *files = current->files;
1154 int retval = oldfd;
1155
1156 rcu_read_lock();
1157 if (!files_lookup_fd_rcu(files, oldfd))
1158 retval = -EBADF;
1159 rcu_read_unlock();
1160 return retval;
1161 }
1162 return ksys_dup3(oldfd, newfd, 0);
1163 }
1164
1165 SYSCALL_DEFINE1(dup, unsigned int, fildes)
1166 {
1167 int ret = -EBADF;
1168 struct file *file = fget_raw(fildes);
1169
1170 if (file) {
1171 ret = get_unused_fd_flags(0);
1172 if (ret >= 0)
1173 fd_install(ret, file);
1174 else
1175 fput(file);
1176 }
1177 return ret;
1178 }
1179
1180 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1181 {
1182 unsigned long nofile = rlimit(RLIMIT_NOFILE);
1183 int err;
1184 if (from >= nofile)
1185 return -EINVAL;
1186 err = alloc_fd(from, nofile, flags);
1187 if (err >= 0) {
1188 get_file(file);
1189 fd_install(err, file);
1190 }
1191 return err;
1192 }
1193
1194 int iterate_fd(struct files_struct *files, unsigned n,
1195 int (*f)(const void *, struct file *, unsigned),
1196 const void *p)
1197 {
1198 struct fdtable *fdt;
1199 int res = 0;
1200 if (!files)
1201 return 0;
1202 spin_lock(&files->file_lock);
1203 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1204 struct file *file;
1205 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1206 if (!file)
1207 continue;
1208 res = f(p, file, n);
1209 if (res)
1210 break;
1211 }
1212 spin_unlock(&files->file_lock);
1213 return res;
1214 }
1215 EXPORT_SYMBOL(iterate_fd);