]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-only | |
2 | /* | |
3 | * linux/fs/file_table.c | |
4 | * | |
5 | * Copyright (C) 1991, 1992 Linus Torvalds | |
6 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | |
7 | */ | |
8 | ||
9 | #include <linux/string.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/file.h> | |
12 | #include <linux/fdtable.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/fs.h> | |
16 | #include <linux/security.h> | |
17 | #include <linux/cred.h> | |
18 | #include <linux/eventpoll.h> | |
19 | #include <linux/rcupdate.h> | |
20 | #include <linux/mount.h> | |
21 | #include <linux/capability.h> | |
22 | #include <linux/cdev.h> | |
23 | #include <linux/fsnotify.h> | |
24 | #include <linux/sysctl.h> | |
25 | #include <linux/percpu_counter.h> | |
26 | #include <linux/percpu.h> | |
27 | #include <linux/task_work.h> | |
28 | #include <linux/ima.h> | |
29 | #include <linux/swap.h> | |
30 | ||
31 | #include <linux/atomic.h> | |
32 | ||
33 | #include "internal.h" | |
34 | ||
35 | /* sysctl tunables... */ | |
36 | struct files_stat_struct files_stat = { | |
37 | .max_files = NR_FILE | |
38 | }; | |
39 | ||
40 | /* SLAB cache for file structures */ | |
41 | static struct kmem_cache *filp_cachep __read_mostly; | |
42 | ||
43 | static struct percpu_counter nr_files __cacheline_aligned_in_smp; | |
44 | ||
45 | static void file_free_rcu(struct rcu_head *head) | |
46 | { | |
47 | struct file *f = container_of(head, struct file, f_u.fu_rcuhead); | |
48 | ||
49 | put_cred(f->f_cred); | |
50 | kmem_cache_free(filp_cachep, f); | |
51 | } | |
52 | ||
53 | static inline void file_free(struct file *f) | |
54 | { | |
55 | security_file_free(f); | |
56 | if (!(f->f_mode & FMODE_NOACCOUNT)) | |
57 | percpu_counter_dec(&nr_files); | |
58 | call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); | |
59 | } | |
60 | ||
61 | /* | |
62 | * Return the total number of open files in the system | |
63 | */ | |
64 | static long get_nr_files(void) | |
65 | { | |
66 | return percpu_counter_read_positive(&nr_files); | |
67 | } | |
68 | ||
69 | /* | |
70 | * Return the maximum number of open files in the system | |
71 | */ | |
72 | unsigned long get_max_files(void) | |
73 | { | |
74 | return files_stat.max_files; | |
75 | } | |
76 | EXPORT_SYMBOL_GPL(get_max_files); | |
77 | ||
78 | /* | |
79 | * Handle nr_files sysctl | |
80 | */ | |
81 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) | |
82 | int proc_nr_files(struct ctl_table *table, int write, | |
83 | void *buffer, size_t *lenp, loff_t *ppos) | |
84 | { | |
85 | files_stat.nr_files = get_nr_files(); | |
86 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | |
87 | } | |
88 | #else | |
89 | int proc_nr_files(struct ctl_table *table, int write, | |
90 | void *buffer, size_t *lenp, loff_t *ppos) | |
91 | { | |
92 | return -ENOSYS; | |
93 | } | |
94 | #endif | |
95 | ||
96 | static struct file *__alloc_file(int flags, const struct cred *cred) | |
97 | { | |
98 | struct file *f; | |
99 | int error; | |
100 | ||
101 | f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); | |
102 | if (unlikely(!f)) | |
103 | return ERR_PTR(-ENOMEM); | |
104 | ||
105 | f->f_cred = get_cred(cred); | |
106 | error = security_file_alloc(f); | |
107 | if (unlikely(error)) { | |
108 | file_free_rcu(&f->f_u.fu_rcuhead); | |
109 | return ERR_PTR(error); | |
110 | } | |
111 | ||
112 | atomic_long_set(&f->f_count, 1); | |
113 | rwlock_init(&f->f_owner.lock); | |
114 | spin_lock_init(&f->f_lock); | |
115 | mutex_init(&f->f_pos_lock); | |
116 | f->f_flags = flags; | |
117 | f->f_mode = OPEN_FMODE(flags); | |
118 | /* f->f_version: 0 */ | |
119 | ||
120 | return f; | |
121 | } | |
122 | ||
123 | /* Find an unused file structure and return a pointer to it. | |
124 | * Returns an error pointer if some error happend e.g. we over file | |
125 | * structures limit, run out of memory or operation is not permitted. | |
126 | * | |
127 | * Be very careful using this. You are responsible for | |
128 | * getting write access to any mount that you might assign | |
129 | * to this filp, if it is opened for write. If this is not | |
130 | * done, you will imbalance int the mount's writer count | |
131 | * and a warning at __fput() time. | |
132 | */ | |
133 | struct file *alloc_empty_file(int flags, const struct cred *cred) | |
134 | { | |
135 | static long old_max; | |
136 | struct file *f; | |
137 | ||
138 | /* | |
139 | * Privileged users can go above max_files | |
140 | */ | |
141 | if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { | |
142 | /* | |
143 | * percpu_counters are inaccurate. Do an expensive check before | |
144 | * we go and fail. | |
145 | */ | |
146 | if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) | |
147 | goto over; | |
148 | } | |
149 | ||
150 | f = __alloc_file(flags, cred); | |
151 | if (!IS_ERR(f)) | |
152 | percpu_counter_inc(&nr_files); | |
153 | ||
154 | return f; | |
155 | ||
156 | over: | |
157 | /* Ran out of filps - report that */ | |
158 | if (get_nr_files() > old_max) { | |
159 | pr_info("VFS: file-max limit %lu reached\n", get_max_files()); | |
160 | old_max = get_nr_files(); | |
161 | } | |
162 | return ERR_PTR(-ENFILE); | |
163 | } | |
164 | EXPORT_SYMBOL_GPL(alloc_empty_file); | |
165 | ||
166 | /* | |
167 | * Variant of alloc_empty_file() that doesn't check and modify nr_files. | |
168 | * | |
169 | * Should not be used unless there's a very good reason to do so. | |
170 | */ | |
171 | struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred) | |
172 | { | |
173 | struct file *f = __alloc_file(flags, cred); | |
174 | ||
175 | if (!IS_ERR(f)) | |
176 | f->f_mode |= FMODE_NOACCOUNT; | |
177 | ||
178 | return f; | |
179 | } | |
180 | ||
181 | /** | |
182 | * alloc_file - allocate and initialize a 'struct file' | |
183 | * | |
184 | * @path: the (dentry, vfsmount) pair for the new file | |
185 | * @flags: O_... flags with which the new file will be opened | |
186 | * @fop: the 'struct file_operations' for the new file | |
187 | */ | |
188 | static struct file *alloc_file(const struct path *path, int flags, | |
189 | const struct file_operations *fop) | |
190 | { | |
191 | struct file *file; | |
192 | ||
193 | file = alloc_empty_file(flags, current_cred()); | |
194 | if (IS_ERR(file)) | |
195 | return file; | |
196 | ||
197 | file->f_path = *path; | |
198 | file->f_inode = path->dentry->d_inode; | |
199 | file->f_mapping = path->dentry->d_inode->i_mapping; | |
200 | file->f_wb_err = filemap_sample_wb_err(file->f_mapping); | |
201 | file->f_sb_err = file_sample_sb_err(file); | |
202 | if ((file->f_mode & FMODE_READ) && | |
203 | likely(fop->read || fop->read_iter)) | |
204 | file->f_mode |= FMODE_CAN_READ; | |
205 | if ((file->f_mode & FMODE_WRITE) && | |
206 | likely(fop->write || fop->write_iter)) | |
207 | file->f_mode |= FMODE_CAN_WRITE; | |
208 | file->f_mode |= FMODE_OPENED; | |
209 | file->f_op = fop; | |
210 | if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) | |
211 | i_readcount_inc(path->dentry->d_inode); | |
212 | return file; | |
213 | } | |
214 | ||
215 | struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt, | |
216 | const char *name, int flags, | |
217 | const struct file_operations *fops) | |
218 | { | |
219 | static const struct dentry_operations anon_ops = { | |
220 | .d_dname = simple_dname | |
221 | }; | |
222 | struct qstr this = QSTR_INIT(name, strlen(name)); | |
223 | struct path path; | |
224 | struct file *file; | |
225 | ||
226 | path.dentry = d_alloc_pseudo(mnt->mnt_sb, &this); | |
227 | if (!path.dentry) | |
228 | return ERR_PTR(-ENOMEM); | |
229 | if (!mnt->mnt_sb->s_d_op) | |
230 | d_set_d_op(path.dentry, &anon_ops); | |
231 | path.mnt = mntget(mnt); | |
232 | d_instantiate(path.dentry, inode); | |
233 | file = alloc_file(&path, flags, fops); | |
234 | if (IS_ERR(file)) { | |
235 | ihold(inode); | |
236 | path_put(&path); | |
237 | } | |
238 | return file; | |
239 | } | |
240 | EXPORT_SYMBOL(alloc_file_pseudo); | |
241 | ||
242 | struct file *alloc_file_clone(struct file *base, int flags, | |
243 | const struct file_operations *fops) | |
244 | { | |
245 | struct file *f = alloc_file(&base->f_path, flags, fops); | |
246 | if (!IS_ERR(f)) { | |
247 | path_get(&f->f_path); | |
248 | f->f_mapping = base->f_mapping; | |
249 | } | |
250 | return f; | |
251 | } | |
252 | ||
253 | /* the real guts of fput() - releasing the last reference to file | |
254 | */ | |
255 | static void __fput(struct file *file) | |
256 | { | |
257 | struct dentry *dentry = file->f_path.dentry; | |
258 | struct vfsmount *mnt = file->f_path.mnt; | |
259 | struct inode *inode = file->f_inode; | |
260 | fmode_t mode = file->f_mode; | |
261 | ||
262 | if (unlikely(!(file->f_mode & FMODE_OPENED))) | |
263 | goto out; | |
264 | ||
265 | might_sleep(); | |
266 | ||
267 | fsnotify_close(file); | |
268 | /* | |
269 | * The function eventpoll_release() should be the first called | |
270 | * in the file cleanup chain. | |
271 | */ | |
272 | eventpoll_release(file); | |
273 | locks_remove_file(file); | |
274 | ||
275 | ima_file_free(file); | |
276 | if (unlikely(file->f_flags & FASYNC)) { | |
277 | if (file->f_op->fasync) | |
278 | file->f_op->fasync(-1, file, 0); | |
279 | } | |
280 | if (file->f_op->release) | |
281 | file->f_op->release(inode, file); | |
282 | if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL && | |
283 | !(mode & FMODE_PATH))) { | |
284 | cdev_put(inode->i_cdev); | |
285 | } | |
286 | fops_put(file->f_op); | |
287 | put_pid(file->f_owner.pid); | |
288 | if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) | |
289 | i_readcount_dec(inode); | |
290 | if (mode & FMODE_WRITER) { | |
291 | put_write_access(inode); | |
292 | __mnt_drop_write(mnt); | |
293 | } | |
294 | dput(dentry); | |
295 | if (unlikely(mode & FMODE_NEED_UNMOUNT)) | |
296 | dissolve_on_fput(mnt); | |
297 | mntput(mnt); | |
298 | out: | |
299 | file_free(file); | |
300 | } | |
301 | ||
302 | static LLIST_HEAD(delayed_fput_list); | |
303 | static void delayed_fput(struct work_struct *unused) | |
304 | { | |
305 | struct llist_node *node = llist_del_all(&delayed_fput_list); | |
306 | struct file *f, *t; | |
307 | ||
308 | llist_for_each_entry_safe(f, t, node, f_u.fu_llist) | |
309 | __fput(f); | |
310 | } | |
311 | ||
312 | static void ____fput(struct callback_head *work) | |
313 | { | |
314 | __fput(container_of(work, struct file, f_u.fu_rcuhead)); | |
315 | } | |
316 | ||
317 | /* | |
318 | * If kernel thread really needs to have the final fput() it has done | |
319 | * to complete, call this. The only user right now is the boot - we | |
320 | * *do* need to make sure our writes to binaries on initramfs has | |
321 | * not left us with opened struct file waiting for __fput() - execve() | |
322 | * won't work without that. Please, don't add more callers without | |
323 | * very good reasons; in particular, never call that with locks | |
324 | * held and never call that from a thread that might need to do | |
325 | * some work on any kind of umount. | |
326 | */ | |
327 | void flush_delayed_fput(void) | |
328 | { | |
329 | delayed_fput(NULL); | |
330 | } | |
331 | EXPORT_SYMBOL_GPL(flush_delayed_fput); | |
332 | ||
333 | static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); | |
334 | ||
335 | void fput_many(struct file *file, unsigned int refs) | |
336 | { | |
337 | if (atomic_long_sub_and_test(refs, &file->f_count)) { | |
338 | struct task_struct *task = current; | |
339 | ||
340 | if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { | |
341 | init_task_work(&file->f_u.fu_rcuhead, ____fput); | |
342 | if (!task_work_add(task, &file->f_u.fu_rcuhead, TWA_RESUME)) | |
343 | return; | |
344 | /* | |
345 | * After this task has run exit_task_work(), | |
346 | * task_work_add() will fail. Fall through to delayed | |
347 | * fput to avoid leaking *file. | |
348 | */ | |
349 | } | |
350 | ||
351 | if (llist_add(&file->f_u.fu_llist, &delayed_fput_list)) | |
352 | schedule_delayed_work(&delayed_fput_work, 1); | |
353 | } | |
354 | } | |
355 | ||
356 | void fput(struct file *file) | |
357 | { | |
358 | fput_many(file, 1); | |
359 | } | |
360 | ||
361 | /* | |
362 | * synchronous analog of fput(); for kernel threads that might be needed | |
363 | * in some umount() (and thus can't use flush_delayed_fput() without | |
364 | * risking deadlocks), need to wait for completion of __fput() and know | |
365 | * for this specific struct file it won't involve anything that would | |
366 | * need them. Use only if you really need it - at the very least, | |
367 | * don't blindly convert fput() by kernel thread to that. | |
368 | */ | |
369 | void __fput_sync(struct file *file) | |
370 | { | |
371 | if (atomic_long_dec_and_test(&file->f_count)) { | |
372 | struct task_struct *task = current; | |
373 | BUG_ON(!(task->flags & PF_KTHREAD)); | |
374 | __fput(file); | |
375 | } | |
376 | } | |
377 | ||
378 | EXPORT_SYMBOL(fput); | |
379 | EXPORT_SYMBOL_GPL(__fput_sync); | |
380 | ||
381 | void __init files_init(void) | |
382 | { | |
383 | filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, | |
384 | SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL); | |
385 | percpu_counter_init(&nr_files, 0, GFP_KERNEL); | |
386 | } | |
387 | ||
388 | /* | |
389 | * One file with associated inode and dcache is very roughly 1K. Per default | |
390 | * do not use more than 10% of our memory for files. | |
391 | */ | |
392 | void __init files_maxfiles_init(void) | |
393 | { | |
394 | unsigned long n; | |
395 | unsigned long nr_pages = totalram_pages(); | |
396 | unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2; | |
397 | ||
398 | memreserve = min(memreserve, nr_pages - 1); | |
399 | n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10; | |
400 | ||
401 | files_stat.max_files = max_t(unsigned long, n, NR_FILE); | |
402 | } |