4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 * Manage the dynamic fd arrays in the process files_struct.
9 #include <linux/export.h>
12 #include <linux/mmzone.h>
13 #include <linux/time.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/rcupdate.h>
23 #include <linux/workqueue.h>
25 struct fdtable_defer
{
27 struct work_struct wq
;
31 int sysctl_nr_open __read_mostly
= 1024*1024;
32 int sysctl_nr_open_min
= BITS_PER_LONG
;
33 int sysctl_nr_open_max
= 1024 * 1024; /* raised later */
36 * We use this list to defer free fdtables that have vmalloced
37 * sets/arrays. By keeping a per-cpu list, we avoid having to embed
38 * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in
39 * this per-task structure.
41 static DEFINE_PER_CPU(struct fdtable_defer
, fdtable_defer_list
);
43 static void *alloc_fdmem(size_t size
)
46 * Very large allocations can stress page reclaim, so fall back to
47 * vmalloc() if the allocation size will be considered "large" by the VM.
49 if (size
<= (PAGE_SIZE
<< PAGE_ALLOC_COSTLY_ORDER
)) {
50 void *data
= kmalloc(size
, GFP_KERNEL
|__GFP_NOWARN
);
57 static void free_fdmem(void *ptr
)
59 is_vmalloc_addr(ptr
) ? vfree(ptr
) : kfree(ptr
);
62 static void __free_fdtable(struct fdtable
*fdt
)
65 free_fdmem(fdt
->open_fds
);
69 static void free_fdtable_work(struct work_struct
*work
)
71 struct fdtable_defer
*f
=
72 container_of(work
, struct fdtable_defer
, wq
);
75 spin_lock_bh(&f
->lock
);
78 spin_unlock_bh(&f
->lock
);
80 struct fdtable
*next
= fdt
->next
;
87 static void free_fdtable_rcu(struct rcu_head
*rcu
)
89 struct fdtable
*fdt
= container_of(rcu
, struct fdtable
, rcu
);
90 struct fdtable_defer
*fddef
;
93 BUG_ON(fdt
->max_fds
<= NR_OPEN_DEFAULT
);
95 if (!is_vmalloc_addr(fdt
->fd
) && !is_vmalloc_addr(fdt
->open_fds
)) {
100 fddef
= &get_cpu_var(fdtable_defer_list
);
101 spin_lock(&fddef
->lock
);
102 fdt
->next
= fddef
->next
;
104 /* vmallocs are handled from the workqueue context */
105 schedule_work(&fddef
->wq
);
106 spin_unlock(&fddef
->lock
);
107 put_cpu_var(fdtable_defer_list
);
112 * Expand the fdset in the files_struct. Called with the files spinlock
115 static void copy_fdtable(struct fdtable
*nfdt
, struct fdtable
*ofdt
)
117 unsigned int cpy
, set
;
119 BUG_ON(nfdt
->max_fds
< ofdt
->max_fds
);
121 cpy
= ofdt
->max_fds
* sizeof(struct file
*);
122 set
= (nfdt
->max_fds
- ofdt
->max_fds
) * sizeof(struct file
*);
123 memcpy(nfdt
->fd
, ofdt
->fd
, cpy
);
124 memset((char *)(nfdt
->fd
) + cpy
, 0, set
);
126 cpy
= ofdt
->max_fds
/ BITS_PER_BYTE
;
127 set
= (nfdt
->max_fds
- ofdt
->max_fds
) / BITS_PER_BYTE
;
128 memcpy(nfdt
->open_fds
, ofdt
->open_fds
, cpy
);
129 memset((char *)(nfdt
->open_fds
) + cpy
, 0, set
);
130 memcpy(nfdt
->close_on_exec
, ofdt
->close_on_exec
, cpy
);
131 memset((char *)(nfdt
->close_on_exec
) + cpy
, 0, set
);
134 static struct fdtable
* alloc_fdtable(unsigned int nr
)
140 * Figure out how many fds we actually want to support in this fdtable.
141 * Allocation steps are keyed to the size of the fdarray, since it
142 * grows far faster than any of the other dynamic data. We try to fit
143 * the fdarray into comfortable page-tuned chunks: starting at 1024B
144 * and growing in powers of two from there on.
146 nr
/= (1024 / sizeof(struct file
*));
147 nr
= roundup_pow_of_two(nr
+ 1);
148 nr
*= (1024 / sizeof(struct file
*));
150 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
151 * had been set lower between the check in expand_files() and here. Deal
152 * with that in caller, it's cheaper that way.
154 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
155 * bitmaps handling below becomes unpleasant, to put it mildly...
157 if (unlikely(nr
> sysctl_nr_open
))
158 nr
= ((sysctl_nr_open
- 1) | (BITS_PER_LONG
- 1)) + 1;
160 fdt
= kmalloc(sizeof(struct fdtable
), GFP_KERNEL
);
164 data
= alloc_fdmem(nr
* sizeof(struct file
*));
169 data
= alloc_fdmem(max_t(size_t,
170 2 * nr
/ BITS_PER_BYTE
, L1_CACHE_BYTES
));
173 fdt
->open_fds
= data
;
174 data
+= nr
/ BITS_PER_BYTE
;
175 fdt
->close_on_exec
= data
;
189 * Expand the file descriptor table.
190 * This function will allocate a new fdtable and both fd array and fdset, of
192 * Return <0 error code on error; 1 on successful completion.
193 * The files->file_lock should be held on entry, and will be held on exit.
195 static int expand_fdtable(struct files_struct
*files
, int nr
)
196 __releases(files
->file_lock
)
197 __acquires(files
->file_lock
)
199 struct fdtable
*new_fdt
, *cur_fdt
;
201 spin_unlock(&files
->file_lock
);
202 new_fdt
= alloc_fdtable(nr
);
203 spin_lock(&files
->file_lock
);
207 * extremely unlikely race - sysctl_nr_open decreased between the check in
208 * caller and alloc_fdtable(). Cheaper to catch it here...
210 if (unlikely(new_fdt
->max_fds
<= nr
)) {
211 __free_fdtable(new_fdt
);
215 * Check again since another task may have expanded the fd table while
216 * we dropped the lock
218 cur_fdt
= files_fdtable(files
);
219 if (nr
>= cur_fdt
->max_fds
) {
220 /* Continue as planned */
221 copy_fdtable(new_fdt
, cur_fdt
);
222 rcu_assign_pointer(files
->fdt
, new_fdt
);
223 if (cur_fdt
->max_fds
> NR_OPEN_DEFAULT
)
224 call_rcu(&cur_fdt
->rcu
, free_fdtable_rcu
);
226 /* Somebody else expanded, so undo our attempt */
227 __free_fdtable(new_fdt
);
234 * This function will expand the file structures, if the requested size exceeds
235 * the current capacity and there is room for expansion.
236 * Return <0 error code on error; 0 when nothing done; 1 when files were
237 * expanded and execution may have blocked.
238 * The files->file_lock should be held on entry, and will be held on exit.
240 int expand_files(struct files_struct
*files
, int nr
)
244 fdt
= files_fdtable(files
);
246 /* Do we need to expand? */
247 if (nr
< fdt
->max_fds
)
251 if (nr
>= sysctl_nr_open
)
254 /* All good, so we try */
255 return expand_fdtable(files
, nr
);
258 static int count_open_files(struct fdtable
*fdt
)
260 int size
= fdt
->max_fds
;
263 /* Find the last open fd */
264 for (i
= size
/ BITS_PER_LONG
; i
> 0; ) {
265 if (fdt
->open_fds
[--i
])
268 i
= (i
+ 1) * BITS_PER_LONG
;
273 * Allocate a new files structure and copy contents from the
274 * passed in files structure.
275 * errorp will be valid only when the returned files_struct is NULL.
277 struct files_struct
*dup_fd(struct files_struct
*oldf
, int *errorp
)
279 struct files_struct
*newf
;
280 struct file
**old_fds
, **new_fds
;
281 int open_files
, size
, i
;
282 struct fdtable
*old_fdt
, *new_fdt
;
285 newf
= kmem_cache_alloc(files_cachep
, GFP_KERNEL
);
289 atomic_set(&newf
->count
, 1);
291 spin_lock_init(&newf
->file_lock
);
293 new_fdt
= &newf
->fdtab
;
294 new_fdt
->max_fds
= NR_OPEN_DEFAULT
;
295 new_fdt
->close_on_exec
= newf
->close_on_exec_init
;
296 new_fdt
->open_fds
= newf
->open_fds_init
;
297 new_fdt
->fd
= &newf
->fd_array
[0];
298 new_fdt
->next
= NULL
;
300 spin_lock(&oldf
->file_lock
);
301 old_fdt
= files_fdtable(oldf
);
302 open_files
= count_open_files(old_fdt
);
305 * Check whether we need to allocate a larger fd array and fd set.
307 while (unlikely(open_files
> new_fdt
->max_fds
)) {
308 spin_unlock(&oldf
->file_lock
);
310 if (new_fdt
!= &newf
->fdtab
)
311 __free_fdtable(new_fdt
);
313 new_fdt
= alloc_fdtable(open_files
- 1);
319 /* beyond sysctl_nr_open; nothing to do */
320 if (unlikely(new_fdt
->max_fds
< open_files
)) {
321 __free_fdtable(new_fdt
);
327 * Reacquire the oldf lock and a pointer to its fd table
328 * who knows it may have a new bigger fd table. We need
329 * the latest pointer.
331 spin_lock(&oldf
->file_lock
);
332 old_fdt
= files_fdtable(oldf
);
333 open_files
= count_open_files(old_fdt
);
336 old_fds
= old_fdt
->fd
;
337 new_fds
= new_fdt
->fd
;
339 memcpy(new_fdt
->open_fds
, old_fdt
->open_fds
, open_files
/ 8);
340 memcpy(new_fdt
->close_on_exec
, old_fdt
->close_on_exec
, open_files
/ 8);
342 for (i
= open_files
; i
!= 0; i
--) {
343 struct file
*f
= *old_fds
++;
348 * The fd may be claimed in the fd bitmap but not yet
349 * instantiated in the files array if a sibling thread
350 * is partway through open(). So make sure that this
351 * fd is available to the new process.
353 __clear_open_fd(open_files
- i
, new_fdt
);
355 rcu_assign_pointer(*new_fds
++, f
);
357 spin_unlock(&oldf
->file_lock
);
359 /* compute the remainder to be cleared */
360 size
= (new_fdt
->max_fds
- open_files
) * sizeof(struct file
*);
362 /* This is long word aligned thus could use a optimized version */
363 memset(new_fds
, 0, size
);
365 if (new_fdt
->max_fds
> open_files
) {
366 int left
= (new_fdt
->max_fds
- open_files
) / 8;
367 int start
= open_files
/ BITS_PER_LONG
;
369 memset(&new_fdt
->open_fds
[start
], 0, left
);
370 memset(&new_fdt
->close_on_exec
[start
], 0, left
);
373 rcu_assign_pointer(newf
->fdt
, new_fdt
);
378 kmem_cache_free(files_cachep
, newf
);
383 static void close_files(struct files_struct
* files
)
391 * It is safe to dereference the fd table without RCU or
392 * ->file_lock because this is the last reference to the
393 * files structure. But use RCU to shut RCU-lockdep up.
396 fdt
= files_fdtable(files
);
400 i
= j
* BITS_PER_LONG
;
401 if (i
>= fdt
->max_fds
)
403 set
= fdt
->open_fds
[j
++];
406 struct file
* file
= xchg(&fdt
->fd
[i
], NULL
);
408 filp_close(file
, files
);
418 struct files_struct
*get_files_struct(struct task_struct
*task
)
420 struct files_struct
*files
;
425 atomic_inc(&files
->count
);
431 void put_files_struct(struct files_struct
*files
)
435 if (atomic_dec_and_test(&files
->count
)) {
437 /* not really needed, since nobody can see us */
439 fdt
= files_fdtable(files
);
441 /* free the arrays if they are not embedded */
442 if (fdt
!= &files
->fdtab
)
444 kmem_cache_free(files_cachep
, files
);
448 void reset_files_struct(struct files_struct
*files
)
450 struct task_struct
*tsk
= current
;
451 struct files_struct
*old
;
457 put_files_struct(old
);
460 void exit_files(struct task_struct
*tsk
)
462 struct files_struct
* files
= tsk
->files
;
468 put_files_struct(files
);
472 static void __devinit
fdtable_defer_list_init(int cpu
)
474 struct fdtable_defer
*fddef
= &per_cpu(fdtable_defer_list
, cpu
);
475 spin_lock_init(&fddef
->lock
);
476 INIT_WORK(&fddef
->wq
, free_fdtable_work
);
480 void __init
files_defer_init(void)
483 for_each_possible_cpu(i
)
484 fdtable_defer_list_init(i
);
485 sysctl_nr_open_max
= min((size_t)INT_MAX
, ~(size_t)0/sizeof(void *)) &
489 struct files_struct init_files
= {
490 .count
= ATOMIC_INIT(1),
491 .fdt
= &init_files
.fdtab
,
493 .max_fds
= NR_OPEN_DEFAULT
,
494 .fd
= &init_files
.fd_array
[0],
495 .close_on_exec
= init_files
.close_on_exec_init
,
496 .open_fds
= init_files
.open_fds_init
,
498 .file_lock
= __SPIN_LOCK_UNLOCKED(init_task
.file_lock
),
502 * allocate a file descriptor, mark it busy.
504 int __alloc_fd(struct files_struct
*files
,
505 unsigned start
, unsigned end
, unsigned flags
)
511 spin_lock(&files
->file_lock
);
513 fdt
= files_fdtable(files
);
515 if (fd
< files
->next_fd
)
518 if (fd
< fdt
->max_fds
)
519 fd
= find_next_zero_bit(fdt
->open_fds
, fdt
->max_fds
, fd
);
522 * N.B. For clone tasks sharing a files structure, this test
523 * will limit the total number of files that can be opened.
529 error
= expand_files(files
, fd
);
534 * If we needed to expand the fs array we
535 * might have blocked - try again.
540 if (start
<= files
->next_fd
)
541 files
->next_fd
= fd
+ 1;
543 __set_open_fd(fd
, fdt
);
544 if (flags
& O_CLOEXEC
)
545 __set_close_on_exec(fd
, fdt
);
547 __clear_close_on_exec(fd
, fdt
);
551 if (rcu_dereference_raw(fdt
->fd
[fd
]) != NULL
) {
552 printk(KERN_WARNING
"alloc_fd: slot %d not NULL!\n", fd
);
553 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
558 spin_unlock(&files
->file_lock
);
562 int alloc_fd(unsigned start
, unsigned flags
)
564 return __alloc_fd(current
->files
, start
, rlimit(RLIMIT_NOFILE
), flags
);
567 int get_unused_fd_flags(unsigned flags
)
569 return __alloc_fd(current
->files
, 0, rlimit(RLIMIT_NOFILE
), flags
);
571 EXPORT_SYMBOL(get_unused_fd_flags
);
573 static void __put_unused_fd(struct files_struct
*files
, unsigned int fd
)
575 struct fdtable
*fdt
= files_fdtable(files
);
576 __clear_open_fd(fd
, fdt
);
577 if (fd
< files
->next_fd
)
581 void put_unused_fd(unsigned int fd
)
583 struct files_struct
*files
= current
->files
;
584 spin_lock(&files
->file_lock
);
585 __put_unused_fd(files
, fd
);
586 spin_unlock(&files
->file_lock
);
589 EXPORT_SYMBOL(put_unused_fd
);
592 * Install a file pointer in the fd array.
594 * The VFS is full of places where we drop the files lock between
595 * setting the open_fds bitmap and installing the file in the file
596 * array. At any such point, we are vulnerable to a dup2() race
597 * installing a file in the array before us. We need to detect this and
598 * fput() the struct file we are about to overwrite in this case.
600 * It should never happen - if we allow dup2() do it, _really_ bad things
604 void fd_install(unsigned int fd
, struct file
*file
)
606 struct files_struct
*files
= current
->files
;
608 spin_lock(&files
->file_lock
);
609 fdt
= files_fdtable(files
);
610 BUG_ON(fdt
->fd
[fd
] != NULL
);
611 rcu_assign_pointer(fdt
->fd
[fd
], file
);
612 spin_unlock(&files
->file_lock
);
615 EXPORT_SYMBOL(fd_install
);