]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/file.c
4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 * Manage the dynamic fd arrays in the process files_struct.
11 #include <linux/time.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/file.h>
15 #include <linux/bitops.h>
16 #include <linux/interrupt.h>
17 #include <linux/spinlock.h>
18 #include <linux/rcupdate.h>
19 #include <linux/workqueue.h>
21 struct fdtable_defer
{
23 struct work_struct wq
;
28 * We use this list to defer free fdtables that have vmalloced
29 * sets/arrays. By keeping a per-cpu list, we avoid having to embed
30 * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in
31 * this per-task structure.
33 static DEFINE_PER_CPU(struct fdtable_defer
, fdtable_defer_list
);
37 * Allocate an fd array, using kmalloc or vmalloc.
38 * Note: the array isn't cleared at allocation time.
40 struct file
** alloc_fd_array(int num
)
42 struct file
**new_fds
;
43 int size
= num
* sizeof(struct file
*);
45 if (size
<= PAGE_SIZE
)
46 new_fds
= (struct file
**) kmalloc(size
, GFP_KERNEL
);
48 new_fds
= (struct file
**) vmalloc(size
);
52 void free_fd_array(struct file
**array
, int num
)
54 int size
= num
* sizeof(struct file
*);
57 printk (KERN_ERR
"free_fd_array: array = 0 (num = %d)\n", num
);
61 if (num
<= NR_OPEN_DEFAULT
) /* Don't free the embedded fd array! */
63 else if (size
<= PAGE_SIZE
)
69 static void __free_fdtable(struct fdtable
*fdt
)
71 free_fdset(fdt
->open_fds
, fdt
->max_fds
);
72 free_fdset(fdt
->close_on_exec
, fdt
->max_fds
);
73 free_fd_array(fdt
->fd
, fdt
->max_fds
);
77 static void free_fdtable_work(struct work_struct
*work
)
79 struct fdtable_defer
*f
=
80 container_of(work
, struct fdtable_defer
, wq
);
83 spin_lock_bh(&f
->lock
);
86 spin_unlock_bh(&f
->lock
);
88 struct fdtable
*next
= fdt
->next
;
94 static void free_fdtable_rcu(struct rcu_head
*rcu
)
96 struct fdtable
*fdt
= container_of(rcu
, struct fdtable
, rcu
);
97 int fdset_size
, fdarray_size
;
98 struct fdtable_defer
*fddef
;
101 fdset_size
= fdt
->max_fds
/ 8;
102 fdarray_size
= fdt
->max_fds
* sizeof(struct file
*);
104 if (fdt
->free_files
) {
106 * The this fdtable was embedded in the files structure
107 * and the files structure itself was getting destroyed.
108 * It is now safe to free the files structure.
110 kmem_cache_free(files_cachep
, fdt
->free_files
);
113 if (fdt
->max_fds
<= NR_OPEN_DEFAULT
)
115 * The fdtable was embedded
118 if (fdset_size
<= PAGE_SIZE
&& fdarray_size
<= PAGE_SIZE
) {
119 kfree(fdt
->open_fds
);
120 kfree(fdt
->close_on_exec
);
124 fddef
= &get_cpu_var(fdtable_defer_list
);
125 spin_lock(&fddef
->lock
);
126 fdt
->next
= fddef
->next
;
128 /* vmallocs are handled from the workqueue context */
129 schedule_work(&fddef
->wq
);
130 spin_unlock(&fddef
->lock
);
131 put_cpu_var(fdtable_defer_list
);
135 void free_fdtable(struct fdtable
*fdt
)
137 if (fdt
->free_files
|| fdt
->max_fds
> NR_OPEN_DEFAULT
)
138 call_rcu(&fdt
->rcu
, free_fdtable_rcu
);
142 * Expand the fdset in the files_struct. Called with the files spinlock
145 static void copy_fdtable(struct fdtable
*nfdt
, struct fdtable
*fdt
)
150 BUG_ON(nfdt
->max_fds
< fdt
->max_fds
);
151 /* Copy the existing tables and install the new pointers */
153 i
= fdt
->max_fds
/ (sizeof(unsigned long) * 8);
154 count
= (nfdt
->max_fds
- fdt
->max_fds
) / 8;
157 * Don't copy the entire array if the current fdset is
158 * not yet initialised.
161 memcpy (nfdt
->open_fds
, fdt
->open_fds
,
163 memcpy (nfdt
->close_on_exec
, fdt
->close_on_exec
,
165 memset (&nfdt
->open_fds
->fds_bits
[i
], 0, count
);
166 memset (&nfdt
->close_on_exec
->fds_bits
[i
], 0, count
);
169 /* Don't copy/clear the array if we are creating a new
170 fd array for fork() */
172 memcpy(nfdt
->fd
, fdt
->fd
,
173 fdt
->max_fds
* sizeof(struct file
*));
174 /* clear the remainder of the array */
175 memset(&nfdt
->fd
[fdt
->max_fds
], 0,
176 (nfdt
->max_fds
- fdt
->max_fds
) *
177 sizeof(struct file
*));
182 * Allocate an fdset array, using kmalloc or vmalloc.
183 * Note: the array isn't cleared at allocation time.
185 fd_set
* alloc_fdset(int num
)
190 if (size
<= PAGE_SIZE
)
191 new_fdset
= (fd_set
*) kmalloc(size
, GFP_KERNEL
);
193 new_fdset
= (fd_set
*) vmalloc(size
);
197 void free_fdset(fd_set
*array
, int num
)
199 if (num
<= NR_OPEN_DEFAULT
) /* Don't free an embedded fdset */
201 else if (num
<= 8 * PAGE_SIZE
)
207 static struct fdtable
*alloc_fdtable(int nr
)
209 struct fdtable
*fdt
= NULL
;
211 fd_set
*new_openset
= NULL
, *new_execset
= NULL
;
212 struct file
**new_fds
;
214 fdt
= kzalloc(sizeof(*fdt
), GFP_KERNEL
);
218 nfds
= NR_OPEN_DEFAULT
;
220 * Expand to the max in easy steps, and keep expanding it until
221 * we have enough for the requested fd array size.
224 #if NR_OPEN_DEFAULT < 256
229 if (nfds
< (PAGE_SIZE
/ sizeof(struct file
*)))
230 nfds
= PAGE_SIZE
/ sizeof(struct file
*);
236 } while (nfds
<= nr
);
238 new_openset
= alloc_fdset(nfds
);
239 new_execset
= alloc_fdset(nfds
);
240 if (!new_openset
|| !new_execset
)
242 fdt
->open_fds
= new_openset
;
243 fdt
->close_on_exec
= new_execset
;
245 new_fds
= alloc_fd_array(nfds
);
250 fdt
->free_files
= NULL
;
253 free_fdset(new_openset
, nfds
);
254 free_fdset(new_execset
, nfds
);
260 * Expand the file descriptor table.
261 * This function will allocate a new fdtable and both fd array and fdset, of
263 * Return <0 error code on error; 1 on successful completion.
264 * The files->file_lock should be held on entry, and will be held on exit.
266 static int expand_fdtable(struct files_struct
*files
, int nr
)
267 __releases(files
->file_lock
)
268 __acquires(files
->file_lock
)
270 struct fdtable
*new_fdt
, *cur_fdt
;
272 spin_unlock(&files
->file_lock
);
273 new_fdt
= alloc_fdtable(nr
);
274 spin_lock(&files
->file_lock
);
278 * Check again since another task may have expanded the fd table while
279 * we dropped the lock
281 cur_fdt
= files_fdtable(files
);
282 if (nr
>= cur_fdt
->max_fds
) {
283 /* Continue as planned */
284 copy_fdtable(new_fdt
, cur_fdt
);
285 rcu_assign_pointer(files
->fdt
, new_fdt
);
286 free_fdtable(cur_fdt
);
288 /* Somebody else expanded, so undo our attempt */
289 __free_fdtable(new_fdt
);
296 * This function will expand the file structures, if the requested size exceeds
297 * the current capacity and there is room for expansion.
298 * Return <0 error code on error; 0 when nothing done; 1 when files were
299 * expanded and execution may have blocked.
300 * The files->file_lock should be held on entry, and will be held on exit.
302 int expand_files(struct files_struct
*files
, int nr
)
306 fdt
= files_fdtable(files
);
307 /* Do we need to expand? */
308 if (nr
< fdt
->max_fds
)
314 /* All good, so we try */
315 return expand_fdtable(files
, nr
);
318 static void __devinit
fdtable_defer_list_init(int cpu
)
320 struct fdtable_defer
*fddef
= &per_cpu(fdtable_defer_list
, cpu
);
321 spin_lock_init(&fddef
->lock
);
322 INIT_WORK(&fddef
->wq
, free_fdtable_work
);
326 void __init
files_defer_init(void)
329 for_each_possible_cpu(i
)
330 fdtable_defer_list_init(i
);