]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/file.c
move put_unused_fd() and fd_install() to fs/file.c
[mirror_ubuntu-bionic-kernel.git] / fs / file.c
1 /*
2 * linux/fs/file.c
3 *
4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
5 *
6 * Manage the dynamic fd arrays in the process files_struct.
7 */
8
9 #include <linux/export.h>
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/mmzone.h>
13 #include <linux/time.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/rcupdate.h>
23 #include <linux/workqueue.h>
24
25 struct fdtable_defer {
26 spinlock_t lock;
27 struct work_struct wq;
28 struct fdtable *next;
29 };
30
31 int sysctl_nr_open __read_mostly = 1024*1024;
32 int sysctl_nr_open_min = BITS_PER_LONG;
33 int sysctl_nr_open_max = 1024 * 1024; /* raised later */
34
35 /*
36 * We use this list to defer free fdtables that have vmalloced
37 * sets/arrays. By keeping a per-cpu list, we avoid having to embed
38 * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in
39 * this per-task structure.
40 */
41 static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
42
43 static void *alloc_fdmem(size_t size)
44 {
45 /*
46 * Very large allocations can stress page reclaim, so fall back to
47 * vmalloc() if the allocation size will be considered "large" by the VM.
48 */
49 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
50 void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
51 if (data != NULL)
52 return data;
53 }
54 return vmalloc(size);
55 }
56
57 static void free_fdmem(void *ptr)
58 {
59 is_vmalloc_addr(ptr) ? vfree(ptr) : kfree(ptr);
60 }
61
62 static void __free_fdtable(struct fdtable *fdt)
63 {
64 free_fdmem(fdt->fd);
65 free_fdmem(fdt->open_fds);
66 kfree(fdt);
67 }
68
69 static void free_fdtable_work(struct work_struct *work)
70 {
71 struct fdtable_defer *f =
72 container_of(work, struct fdtable_defer, wq);
73 struct fdtable *fdt;
74
75 spin_lock_bh(&f->lock);
76 fdt = f->next;
77 f->next = NULL;
78 spin_unlock_bh(&f->lock);
79 while(fdt) {
80 struct fdtable *next = fdt->next;
81
82 __free_fdtable(fdt);
83 fdt = next;
84 }
85 }
86
87 static void free_fdtable_rcu(struct rcu_head *rcu)
88 {
89 struct fdtable *fdt = container_of(rcu, struct fdtable, rcu);
90 struct fdtable_defer *fddef;
91
92 BUG_ON(!fdt);
93 BUG_ON(fdt->max_fds <= NR_OPEN_DEFAULT);
94
95 if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) {
96 kfree(fdt->fd);
97 kfree(fdt->open_fds);
98 kfree(fdt);
99 } else {
100 fddef = &get_cpu_var(fdtable_defer_list);
101 spin_lock(&fddef->lock);
102 fdt->next = fddef->next;
103 fddef->next = fdt;
104 /* vmallocs are handled from the workqueue context */
105 schedule_work(&fddef->wq);
106 spin_unlock(&fddef->lock);
107 put_cpu_var(fdtable_defer_list);
108 }
109 }
110
111 /*
112 * Expand the fdset in the files_struct. Called with the files spinlock
113 * held for write.
114 */
115 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
116 {
117 unsigned int cpy, set;
118
119 BUG_ON(nfdt->max_fds < ofdt->max_fds);
120
121 cpy = ofdt->max_fds * sizeof(struct file *);
122 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
123 memcpy(nfdt->fd, ofdt->fd, cpy);
124 memset((char *)(nfdt->fd) + cpy, 0, set);
125
126 cpy = ofdt->max_fds / BITS_PER_BYTE;
127 set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE;
128 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
129 memset((char *)(nfdt->open_fds) + cpy, 0, set);
130 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
131 memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
132 }
133
134 static struct fdtable * alloc_fdtable(unsigned int nr)
135 {
136 struct fdtable *fdt;
137 void *data;
138
139 /*
140 * Figure out how many fds we actually want to support in this fdtable.
141 * Allocation steps are keyed to the size of the fdarray, since it
142 * grows far faster than any of the other dynamic data. We try to fit
143 * the fdarray into comfortable page-tuned chunks: starting at 1024B
144 * and growing in powers of two from there on.
145 */
146 nr /= (1024 / sizeof(struct file *));
147 nr = roundup_pow_of_two(nr + 1);
148 nr *= (1024 / sizeof(struct file *));
149 /*
150 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
151 * had been set lower between the check in expand_files() and here. Deal
152 * with that in caller, it's cheaper that way.
153 *
154 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
155 * bitmaps handling below becomes unpleasant, to put it mildly...
156 */
157 if (unlikely(nr > sysctl_nr_open))
158 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
159
160 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL);
161 if (!fdt)
162 goto out;
163 fdt->max_fds = nr;
164 data = alloc_fdmem(nr * sizeof(struct file *));
165 if (!data)
166 goto out_fdt;
167 fdt->fd = data;
168
169 data = alloc_fdmem(max_t(size_t,
170 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
171 if (!data)
172 goto out_arr;
173 fdt->open_fds = data;
174 data += nr / BITS_PER_BYTE;
175 fdt->close_on_exec = data;
176 fdt->next = NULL;
177
178 return fdt;
179
180 out_arr:
181 free_fdmem(fdt->fd);
182 out_fdt:
183 kfree(fdt);
184 out:
185 return NULL;
186 }
187
188 /*
189 * Expand the file descriptor table.
190 * This function will allocate a new fdtable and both fd array and fdset, of
191 * the given size.
192 * Return <0 error code on error; 1 on successful completion.
193 * The files->file_lock should be held on entry, and will be held on exit.
194 */
195 static int expand_fdtable(struct files_struct *files, int nr)
196 __releases(files->file_lock)
197 __acquires(files->file_lock)
198 {
199 struct fdtable *new_fdt, *cur_fdt;
200
201 spin_unlock(&files->file_lock);
202 new_fdt = alloc_fdtable(nr);
203 spin_lock(&files->file_lock);
204 if (!new_fdt)
205 return -ENOMEM;
206 /*
207 * extremely unlikely race - sysctl_nr_open decreased between the check in
208 * caller and alloc_fdtable(). Cheaper to catch it here...
209 */
210 if (unlikely(new_fdt->max_fds <= nr)) {
211 __free_fdtable(new_fdt);
212 return -EMFILE;
213 }
214 /*
215 * Check again since another task may have expanded the fd table while
216 * we dropped the lock
217 */
218 cur_fdt = files_fdtable(files);
219 if (nr >= cur_fdt->max_fds) {
220 /* Continue as planned */
221 copy_fdtable(new_fdt, cur_fdt);
222 rcu_assign_pointer(files->fdt, new_fdt);
223 if (cur_fdt->max_fds > NR_OPEN_DEFAULT)
224 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
225 } else {
226 /* Somebody else expanded, so undo our attempt */
227 __free_fdtable(new_fdt);
228 }
229 return 1;
230 }
231
232 /*
233 * Expand files.
234 * This function will expand the file structures, if the requested size exceeds
235 * the current capacity and there is room for expansion.
236 * Return <0 error code on error; 0 when nothing done; 1 when files were
237 * expanded and execution may have blocked.
238 * The files->file_lock should be held on entry, and will be held on exit.
239 */
240 int expand_files(struct files_struct *files, int nr)
241 {
242 struct fdtable *fdt;
243
244 fdt = files_fdtable(files);
245
246 /* Do we need to expand? */
247 if (nr < fdt->max_fds)
248 return 0;
249
250 /* Can we expand? */
251 if (nr >= sysctl_nr_open)
252 return -EMFILE;
253
254 /* All good, so we try */
255 return expand_fdtable(files, nr);
256 }
257
258 static int count_open_files(struct fdtable *fdt)
259 {
260 int size = fdt->max_fds;
261 int i;
262
263 /* Find the last open fd */
264 for (i = size / BITS_PER_LONG; i > 0; ) {
265 if (fdt->open_fds[--i])
266 break;
267 }
268 i = (i + 1) * BITS_PER_LONG;
269 return i;
270 }
271
272 /*
273 * Allocate a new files structure and copy contents from the
274 * passed in files structure.
275 * errorp will be valid only when the returned files_struct is NULL.
276 */
277 struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
278 {
279 struct files_struct *newf;
280 struct file **old_fds, **new_fds;
281 int open_files, size, i;
282 struct fdtable *old_fdt, *new_fdt;
283
284 *errorp = -ENOMEM;
285 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
286 if (!newf)
287 goto out;
288
289 atomic_set(&newf->count, 1);
290
291 spin_lock_init(&newf->file_lock);
292 newf->next_fd = 0;
293 new_fdt = &newf->fdtab;
294 new_fdt->max_fds = NR_OPEN_DEFAULT;
295 new_fdt->close_on_exec = newf->close_on_exec_init;
296 new_fdt->open_fds = newf->open_fds_init;
297 new_fdt->fd = &newf->fd_array[0];
298 new_fdt->next = NULL;
299
300 spin_lock(&oldf->file_lock);
301 old_fdt = files_fdtable(oldf);
302 open_files = count_open_files(old_fdt);
303
304 /*
305 * Check whether we need to allocate a larger fd array and fd set.
306 */
307 while (unlikely(open_files > new_fdt->max_fds)) {
308 spin_unlock(&oldf->file_lock);
309
310 if (new_fdt != &newf->fdtab)
311 __free_fdtable(new_fdt);
312
313 new_fdt = alloc_fdtable(open_files - 1);
314 if (!new_fdt) {
315 *errorp = -ENOMEM;
316 goto out_release;
317 }
318
319 /* beyond sysctl_nr_open; nothing to do */
320 if (unlikely(new_fdt->max_fds < open_files)) {
321 __free_fdtable(new_fdt);
322 *errorp = -EMFILE;
323 goto out_release;
324 }
325
326 /*
327 * Reacquire the oldf lock and a pointer to its fd table
328 * who knows it may have a new bigger fd table. We need
329 * the latest pointer.
330 */
331 spin_lock(&oldf->file_lock);
332 old_fdt = files_fdtable(oldf);
333 open_files = count_open_files(old_fdt);
334 }
335
336 old_fds = old_fdt->fd;
337 new_fds = new_fdt->fd;
338
339 memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8);
340 memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8);
341
342 for (i = open_files; i != 0; i--) {
343 struct file *f = *old_fds++;
344 if (f) {
345 get_file(f);
346 } else {
347 /*
348 * The fd may be claimed in the fd bitmap but not yet
349 * instantiated in the files array if a sibling thread
350 * is partway through open(). So make sure that this
351 * fd is available to the new process.
352 */
353 __clear_open_fd(open_files - i, new_fdt);
354 }
355 rcu_assign_pointer(*new_fds++, f);
356 }
357 spin_unlock(&oldf->file_lock);
358
359 /* compute the remainder to be cleared */
360 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
361
362 /* This is long word aligned thus could use a optimized version */
363 memset(new_fds, 0, size);
364
365 if (new_fdt->max_fds > open_files) {
366 int left = (new_fdt->max_fds - open_files) / 8;
367 int start = open_files / BITS_PER_LONG;
368
369 memset(&new_fdt->open_fds[start], 0, left);
370 memset(&new_fdt->close_on_exec[start], 0, left);
371 }
372
373 rcu_assign_pointer(newf->fdt, new_fdt);
374
375 return newf;
376
377 out_release:
378 kmem_cache_free(files_cachep, newf);
379 out:
380 return NULL;
381 }
382
383 static void close_files(struct files_struct * files)
384 {
385 int i, j;
386 struct fdtable *fdt;
387
388 j = 0;
389
390 /*
391 * It is safe to dereference the fd table without RCU or
392 * ->file_lock because this is the last reference to the
393 * files structure. But use RCU to shut RCU-lockdep up.
394 */
395 rcu_read_lock();
396 fdt = files_fdtable(files);
397 rcu_read_unlock();
398 for (;;) {
399 unsigned long set;
400 i = j * BITS_PER_LONG;
401 if (i >= fdt->max_fds)
402 break;
403 set = fdt->open_fds[j++];
404 while (set) {
405 if (set & 1) {
406 struct file * file = xchg(&fdt->fd[i], NULL);
407 if (file) {
408 filp_close(file, files);
409 cond_resched();
410 }
411 }
412 i++;
413 set >>= 1;
414 }
415 }
416 }
417
418 struct files_struct *get_files_struct(struct task_struct *task)
419 {
420 struct files_struct *files;
421
422 task_lock(task);
423 files = task->files;
424 if (files)
425 atomic_inc(&files->count);
426 task_unlock(task);
427
428 return files;
429 }
430
431 void put_files_struct(struct files_struct *files)
432 {
433 struct fdtable *fdt;
434
435 if (atomic_dec_and_test(&files->count)) {
436 close_files(files);
437 /* not really needed, since nobody can see us */
438 rcu_read_lock();
439 fdt = files_fdtable(files);
440 rcu_read_unlock();
441 /* free the arrays if they are not embedded */
442 if (fdt != &files->fdtab)
443 __free_fdtable(fdt);
444 kmem_cache_free(files_cachep, files);
445 }
446 }
447
448 void reset_files_struct(struct files_struct *files)
449 {
450 struct task_struct *tsk = current;
451 struct files_struct *old;
452
453 old = tsk->files;
454 task_lock(tsk);
455 tsk->files = files;
456 task_unlock(tsk);
457 put_files_struct(old);
458 }
459
460 void exit_files(struct task_struct *tsk)
461 {
462 struct files_struct * files = tsk->files;
463
464 if (files) {
465 task_lock(tsk);
466 tsk->files = NULL;
467 task_unlock(tsk);
468 put_files_struct(files);
469 }
470 }
471
472 static void __devinit fdtable_defer_list_init(int cpu)
473 {
474 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
475 spin_lock_init(&fddef->lock);
476 INIT_WORK(&fddef->wq, free_fdtable_work);
477 fddef->next = NULL;
478 }
479
480 void __init files_defer_init(void)
481 {
482 int i;
483 for_each_possible_cpu(i)
484 fdtable_defer_list_init(i);
485 sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) &
486 -BITS_PER_LONG;
487 }
488
489 struct files_struct init_files = {
490 .count = ATOMIC_INIT(1),
491 .fdt = &init_files.fdtab,
492 .fdtab = {
493 .max_fds = NR_OPEN_DEFAULT,
494 .fd = &init_files.fd_array[0],
495 .close_on_exec = init_files.close_on_exec_init,
496 .open_fds = init_files.open_fds_init,
497 },
498 .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
499 };
500
501 /*
502 * allocate a file descriptor, mark it busy.
503 */
504 int __alloc_fd(struct files_struct *files,
505 unsigned start, unsigned end, unsigned flags)
506 {
507 unsigned int fd;
508 int error;
509 struct fdtable *fdt;
510
511 spin_lock(&files->file_lock);
512 repeat:
513 fdt = files_fdtable(files);
514 fd = start;
515 if (fd < files->next_fd)
516 fd = files->next_fd;
517
518 if (fd < fdt->max_fds)
519 fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, fd);
520
521 /*
522 * N.B. For clone tasks sharing a files structure, this test
523 * will limit the total number of files that can be opened.
524 */
525 error = -EMFILE;
526 if (fd >= end)
527 goto out;
528
529 error = expand_files(files, fd);
530 if (error < 0)
531 goto out;
532
533 /*
534 * If we needed to expand the fs array we
535 * might have blocked - try again.
536 */
537 if (error)
538 goto repeat;
539
540 if (start <= files->next_fd)
541 files->next_fd = fd + 1;
542
543 __set_open_fd(fd, fdt);
544 if (flags & O_CLOEXEC)
545 __set_close_on_exec(fd, fdt);
546 else
547 __clear_close_on_exec(fd, fdt);
548 error = fd;
549 #if 1
550 /* Sanity check */
551 if (rcu_dereference_raw(fdt->fd[fd]) != NULL) {
552 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
553 rcu_assign_pointer(fdt->fd[fd], NULL);
554 }
555 #endif
556
557 out:
558 spin_unlock(&files->file_lock);
559 return error;
560 }
561
562 int alloc_fd(unsigned start, unsigned flags)
563 {
564 return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
565 }
566
567 int get_unused_fd_flags(unsigned flags)
568 {
569 return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
570 }
571 EXPORT_SYMBOL(get_unused_fd_flags);
572
573 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
574 {
575 struct fdtable *fdt = files_fdtable(files);
576 __clear_open_fd(fd, fdt);
577 if (fd < files->next_fd)
578 files->next_fd = fd;
579 }
580
581 void put_unused_fd(unsigned int fd)
582 {
583 struct files_struct *files = current->files;
584 spin_lock(&files->file_lock);
585 __put_unused_fd(files, fd);
586 spin_unlock(&files->file_lock);
587 }
588
589 EXPORT_SYMBOL(put_unused_fd);
590
591 /*
592 * Install a file pointer in the fd array.
593 *
594 * The VFS is full of places where we drop the files lock between
595 * setting the open_fds bitmap and installing the file in the file
596 * array. At any such point, we are vulnerable to a dup2() race
597 * installing a file in the array before us. We need to detect this and
598 * fput() the struct file we are about to overwrite in this case.
599 *
600 * It should never happen - if we allow dup2() do it, _really_ bad things
601 * will follow.
602 */
603
604 void fd_install(unsigned int fd, struct file *file)
605 {
606 struct files_struct *files = current->files;
607 struct fdtable *fdt;
608 spin_lock(&files->file_lock);
609 fdt = files_fdtable(files);
610 BUG_ON(fdt->fd[fd] != NULL);
611 rcu_assign_pointer(fdt->fd[fd], file);
612 spin_unlock(&files->file_lock);
613 }
614
615 EXPORT_SYMBOL(fd_install);