]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/file.c
[PATCH] dup_fd() part 2
[mirror_ubuntu-bionic-kernel.git] / fs / file.c
1 /*
2 * linux/fs/file.c
3 *
4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
5 *
6 * Manage the dynamic fd arrays in the process files_struct.
7 */
8
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/time.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/file.h>
15 #include <linux/fdtable.h>
16 #include <linux/bitops.h>
17 #include <linux/interrupt.h>
18 #include <linux/spinlock.h>
19 #include <linux/rcupdate.h>
20 #include <linux/workqueue.h>
21
22 struct fdtable_defer {
23 spinlock_t lock;
24 struct work_struct wq;
25 struct fdtable *next;
26 };
27
28 int sysctl_nr_open __read_mostly = 1024*1024;
29
30 /*
31 * We use this list to defer free fdtables that have vmalloced
32 * sets/arrays. By keeping a per-cpu list, we avoid having to embed
33 * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in
34 * this per-task structure.
35 */
36 static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
37
38 static inline void * alloc_fdmem(unsigned int size)
39 {
40 if (size <= PAGE_SIZE)
41 return kmalloc(size, GFP_KERNEL);
42 else
43 return vmalloc(size);
44 }
45
46 static inline void free_fdarr(struct fdtable *fdt)
47 {
48 if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *)))
49 kfree(fdt->fd);
50 else
51 vfree(fdt->fd);
52 }
53
54 static inline void free_fdset(struct fdtable *fdt)
55 {
56 if (fdt->max_fds <= (PAGE_SIZE * BITS_PER_BYTE / 2))
57 kfree(fdt->open_fds);
58 else
59 vfree(fdt->open_fds);
60 }
61
62 static void free_fdtable_work(struct work_struct *work)
63 {
64 struct fdtable_defer *f =
65 container_of(work, struct fdtable_defer, wq);
66 struct fdtable *fdt;
67
68 spin_lock_bh(&f->lock);
69 fdt = f->next;
70 f->next = NULL;
71 spin_unlock_bh(&f->lock);
72 while(fdt) {
73 struct fdtable *next = fdt->next;
74 vfree(fdt->fd);
75 free_fdset(fdt);
76 kfree(fdt);
77 fdt = next;
78 }
79 }
80
81 void free_fdtable_rcu(struct rcu_head *rcu)
82 {
83 struct fdtable *fdt = container_of(rcu, struct fdtable, rcu);
84 struct fdtable_defer *fddef;
85
86 BUG_ON(!fdt);
87
88 if (fdt->max_fds <= NR_OPEN_DEFAULT) {
89 /*
90 * This fdtable is embedded in the files structure and that
91 * structure itself is getting destroyed.
92 */
93 kmem_cache_free(files_cachep,
94 container_of(fdt, struct files_struct, fdtab));
95 return;
96 }
97 if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) {
98 kfree(fdt->fd);
99 kfree(fdt->open_fds);
100 kfree(fdt);
101 } else {
102 fddef = &get_cpu_var(fdtable_defer_list);
103 spin_lock(&fddef->lock);
104 fdt->next = fddef->next;
105 fddef->next = fdt;
106 /* vmallocs are handled from the workqueue context */
107 schedule_work(&fddef->wq);
108 spin_unlock(&fddef->lock);
109 put_cpu_var(fdtable_defer_list);
110 }
111 }
112
113 /*
114 * Expand the fdset in the files_struct. Called with the files spinlock
115 * held for write.
116 */
117 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
118 {
119 unsigned int cpy, set;
120
121 BUG_ON(nfdt->max_fds < ofdt->max_fds);
122
123 cpy = ofdt->max_fds * sizeof(struct file *);
124 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
125 memcpy(nfdt->fd, ofdt->fd, cpy);
126 memset((char *)(nfdt->fd) + cpy, 0, set);
127
128 cpy = ofdt->max_fds / BITS_PER_BYTE;
129 set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE;
130 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
131 memset((char *)(nfdt->open_fds) + cpy, 0, set);
132 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
133 memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
134 }
135
136 static struct fdtable * alloc_fdtable(unsigned int nr)
137 {
138 struct fdtable *fdt;
139 char *data;
140
141 /*
142 * Figure out how many fds we actually want to support in this fdtable.
143 * Allocation steps are keyed to the size of the fdarray, since it
144 * grows far faster than any of the other dynamic data. We try to fit
145 * the fdarray into comfortable page-tuned chunks: starting at 1024B
146 * and growing in powers of two from there on.
147 */
148 nr /= (1024 / sizeof(struct file *));
149 nr = roundup_pow_of_two(nr + 1);
150 nr *= (1024 / sizeof(struct file *));
151 /*
152 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
153 * had been set lower between the check in expand_files() and here. Deal
154 * with that in caller, it's cheaper that way.
155 *
156 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
157 * bitmaps handling below becomes unpleasant, to put it mildly...
158 */
159 if (unlikely(nr > sysctl_nr_open))
160 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
161
162 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL);
163 if (!fdt)
164 goto out;
165 fdt->max_fds = nr;
166 data = alloc_fdmem(nr * sizeof(struct file *));
167 if (!data)
168 goto out_fdt;
169 fdt->fd = (struct file **)data;
170 data = alloc_fdmem(max_t(unsigned int,
171 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
172 if (!data)
173 goto out_arr;
174 fdt->open_fds = (fd_set *)data;
175 data += nr / BITS_PER_BYTE;
176 fdt->close_on_exec = (fd_set *)data;
177 INIT_RCU_HEAD(&fdt->rcu);
178 fdt->next = NULL;
179
180 return fdt;
181
182 out_arr:
183 free_fdarr(fdt);
184 out_fdt:
185 kfree(fdt);
186 out:
187 return NULL;
188 }
189
190 /*
191 * Expand the file descriptor table.
192 * This function will allocate a new fdtable and both fd array and fdset, of
193 * the given size.
194 * Return <0 error code on error; 1 on successful completion.
195 * The files->file_lock should be held on entry, and will be held on exit.
196 */
197 static int expand_fdtable(struct files_struct *files, int nr)
198 __releases(files->file_lock)
199 __acquires(files->file_lock)
200 {
201 struct fdtable *new_fdt, *cur_fdt;
202
203 spin_unlock(&files->file_lock);
204 new_fdt = alloc_fdtable(nr);
205 spin_lock(&files->file_lock);
206 if (!new_fdt)
207 return -ENOMEM;
208 /*
209 * extremely unlikely race - sysctl_nr_open decreased between the check in
210 * caller and alloc_fdtable(). Cheaper to catch it here...
211 */
212 if (unlikely(new_fdt->max_fds <= nr)) {
213 free_fdarr(new_fdt);
214 free_fdset(new_fdt);
215 kfree(new_fdt);
216 return -EMFILE;
217 }
218 /*
219 * Check again since another task may have expanded the fd table while
220 * we dropped the lock
221 */
222 cur_fdt = files_fdtable(files);
223 if (nr >= cur_fdt->max_fds) {
224 /* Continue as planned */
225 copy_fdtable(new_fdt, cur_fdt);
226 rcu_assign_pointer(files->fdt, new_fdt);
227 if (cur_fdt->max_fds > NR_OPEN_DEFAULT)
228 free_fdtable(cur_fdt);
229 } else {
230 /* Somebody else expanded, so undo our attempt */
231 free_fdarr(new_fdt);
232 free_fdset(new_fdt);
233 kfree(new_fdt);
234 }
235 return 1;
236 }
237
238 /*
239 * Expand files.
240 * This function will expand the file structures, if the requested size exceeds
241 * the current capacity and there is room for expansion.
242 * Return <0 error code on error; 0 when nothing done; 1 when files were
243 * expanded and execution may have blocked.
244 * The files->file_lock should be held on entry, and will be held on exit.
245 */
246 int expand_files(struct files_struct *files, int nr)
247 {
248 struct fdtable *fdt;
249
250 fdt = files_fdtable(files);
251 /* Do we need to expand? */
252 if (nr < fdt->max_fds)
253 return 0;
254 /* Can we expand? */
255 if (nr >= sysctl_nr_open)
256 return -EMFILE;
257
258 /* All good, so we try */
259 return expand_fdtable(files, nr);
260 }
261
262 static int count_open_files(struct fdtable *fdt)
263 {
264 int size = fdt->max_fds;
265 int i;
266
267 /* Find the last open fd */
268 for (i = size/(8*sizeof(long)); i > 0; ) {
269 if (fdt->open_fds->fds_bits[--i])
270 break;
271 }
272 i = (i+1) * 8 * sizeof(long);
273 return i;
274 }
275
276 static struct files_struct *alloc_files(void)
277 {
278 struct files_struct *newf;
279 struct fdtable *fdt;
280
281 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
282 if (!newf)
283 goto out;
284
285 atomic_set(&newf->count, 1);
286
287 spin_lock_init(&newf->file_lock);
288 newf->next_fd = 0;
289 fdt = &newf->fdtab;
290 fdt->max_fds = NR_OPEN_DEFAULT;
291 fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
292 fdt->open_fds = (fd_set *)&newf->open_fds_init;
293 fdt->fd = &newf->fd_array[0];
294 INIT_RCU_HEAD(&fdt->rcu);
295 fdt->next = NULL;
296 rcu_assign_pointer(newf->fdt, fdt);
297 out:
298 return newf;
299 }
300
301 /*
302 * Allocate a new files structure and copy contents from the
303 * passed in files structure.
304 * errorp will be valid only when the returned files_struct is NULL.
305 */
306 struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
307 {
308 struct files_struct *newf;
309 struct file **old_fds, **new_fds;
310 int open_files, size, i;
311 struct fdtable *old_fdt, *new_fdt;
312
313 *errorp = -ENOMEM;
314 newf = alloc_files();
315 if (!newf)
316 goto out;
317
318 spin_lock(&oldf->file_lock);
319 old_fdt = files_fdtable(oldf);
320 new_fdt = files_fdtable(newf);
321 open_files = count_open_files(old_fdt);
322
323 /*
324 * Check whether we need to allocate a larger fd array and fd set.
325 * Note: we're not a clone task, so the open count won't change.
326 */
327 if (open_files > new_fdt->max_fds) {
328 spin_unlock(&oldf->file_lock);
329
330 new_fdt = alloc_fdtable(open_files - 1);
331 if (!new_fdt) {
332 *errorp = -ENOMEM;
333 goto out_release;
334 }
335
336 /* beyond sysctl_nr_open; nothing to do */
337 if (unlikely(new_fdt->max_fds < open_files)) {
338 free_fdarr(new_fdt);
339 free_fdset(new_fdt);
340 kfree(new_fdt);
341 *errorp = -EMFILE;
342 goto out_release;
343 }
344 rcu_assign_pointer(files->fdt, new_fdt);
345
346 /*
347 * Reacquire the oldf lock and a pointer to its fd table
348 * who knows it may have a new bigger fd table. We need
349 * the latest pointer.
350 */
351 spin_lock(&oldf->file_lock);
352 old_fdt = files_fdtable(oldf);
353 }
354
355 old_fds = old_fdt->fd;
356 new_fds = new_fdt->fd;
357
358 memcpy(new_fdt->open_fds->fds_bits,
359 old_fdt->open_fds->fds_bits, open_files/8);
360 memcpy(new_fdt->close_on_exec->fds_bits,
361 old_fdt->close_on_exec->fds_bits, open_files/8);
362
363 for (i = open_files; i != 0; i--) {
364 struct file *f = *old_fds++;
365 if (f) {
366 get_file(f);
367 } else {
368 /*
369 * The fd may be claimed in the fd bitmap but not yet
370 * instantiated in the files array if a sibling thread
371 * is partway through open(). So make sure that this
372 * fd is available to the new process.
373 */
374 FD_CLR(open_files - i, new_fdt->open_fds);
375 }
376 rcu_assign_pointer(*new_fds++, f);
377 }
378 spin_unlock(&oldf->file_lock);
379
380 /* compute the remainder to be cleared */
381 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
382
383 /* This is long word aligned thus could use a optimized version */
384 memset(new_fds, 0, size);
385
386 if (new_fdt->max_fds > open_files) {
387 int left = (new_fdt->max_fds-open_files)/8;
388 int start = open_files / (8 * sizeof(unsigned long));
389
390 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
391 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
392 }
393
394 return newf;
395
396 out_release:
397 kmem_cache_free(files_cachep, newf);
398 out:
399 return NULL;
400 }
401
402 static void __devinit fdtable_defer_list_init(int cpu)
403 {
404 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
405 spin_lock_init(&fddef->lock);
406 INIT_WORK(&fddef->wq, free_fdtable_work);
407 fddef->next = NULL;
408 }
409
410 void __init files_defer_init(void)
411 {
412 int i;
413 for_each_possible_cpu(i)
414 fdtable_defer_list_init(i);
415 }
416
417 struct files_struct init_files = {
418 .count = ATOMIC_INIT(1),
419 .fdt = &init_files.fdtab,
420 .fdtab = {
421 .max_fds = NR_OPEN_DEFAULT,
422 .fd = &init_files.fd_array[0],
423 .close_on_exec = (fd_set *)&init_files.close_on_exec_init,
424 .open_fds = (fd_set *)&init_files.open_fds_init,
425 .rcu = RCU_HEAD_INIT,
426 },
427 .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
428 };