]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/file_table.c
[POWERPC] Make walk_memory_resource available with MEMORY_HOTPLUG=n
[mirror_ubuntu-bionic-kernel.git] / fs / file_table.c
1 /*
2 * linux/fs/file_table.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/fdtable.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/security.h>
16 #include <linux/eventpoll.h>
17 #include <linux/rcupdate.h>
18 #include <linux/mount.h>
19 #include <linux/capability.h>
20 #include <linux/cdev.h>
21 #include <linux/fsnotify.h>
22 #include <linux/sysctl.h>
23 #include <linux/percpu_counter.h>
24
25 #include <asm/atomic.h>
26
27 /* sysctl tunables... */
28 struct files_stat_struct files_stat = {
29 .max_files = NR_FILE
30 };
31
32 /* public. Not pretty! */
33 __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
34
35 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
36
37 static inline void file_free_rcu(struct rcu_head *head)
38 {
39 struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
40 kmem_cache_free(filp_cachep, f);
41 }
42
43 static inline void file_free(struct file *f)
44 {
45 percpu_counter_dec(&nr_files);
46 file_check_state(f);
47 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
48 }
49
50 /*
51 * Return the total number of open files in the system
52 */
53 static int get_nr_files(void)
54 {
55 return percpu_counter_read_positive(&nr_files);
56 }
57
58 /*
59 * Return the maximum number of open files in the system
60 */
61 int get_max_files(void)
62 {
63 return files_stat.max_files;
64 }
65 EXPORT_SYMBOL_GPL(get_max_files);
66
67 /*
68 * Handle nr_files sysctl
69 */
70 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
71 int proc_nr_files(ctl_table *table, int write, struct file *filp,
72 void __user *buffer, size_t *lenp, loff_t *ppos)
73 {
74 files_stat.nr_files = get_nr_files();
75 return proc_dointvec(table, write, filp, buffer, lenp, ppos);
76 }
77 #else
78 int proc_nr_files(ctl_table *table, int write, struct file *filp,
79 void __user *buffer, size_t *lenp, loff_t *ppos)
80 {
81 return -ENOSYS;
82 }
83 #endif
84
85 /* Find an unused file structure and return a pointer to it.
86 * Returns NULL, if there are no more free file structures or
87 * we run out of memory.
88 *
89 * Be very careful using this. You are responsible for
90 * getting write access to any mount that you might assign
91 * to this filp, if it is opened for write. If this is not
92 * done, you will imbalance int the mount's writer count
93 * and a warning at __fput() time.
94 */
95 struct file *get_empty_filp(void)
96 {
97 struct task_struct *tsk;
98 static int old_max;
99 struct file * f;
100
101 /*
102 * Privileged users can go above max_files
103 */
104 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
105 /*
106 * percpu_counters are inaccurate. Do an expensive check before
107 * we go and fail.
108 */
109 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
110 goto over;
111 }
112
113 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
114 if (f == NULL)
115 goto fail;
116
117 percpu_counter_inc(&nr_files);
118 if (security_file_alloc(f))
119 goto fail_sec;
120
121 tsk = current;
122 INIT_LIST_HEAD(&f->f_u.fu_list);
123 atomic_set(&f->f_count, 1);
124 rwlock_init(&f->f_owner.lock);
125 f->f_uid = tsk->fsuid;
126 f->f_gid = tsk->fsgid;
127 eventpoll_init_file(f);
128 /* f->f_version: 0 */
129 return f;
130
131 over:
132 /* Ran out of filps - report that */
133 if (get_nr_files() > old_max) {
134 printk(KERN_INFO "VFS: file-max limit %d reached\n",
135 get_max_files());
136 old_max = get_nr_files();
137 }
138 goto fail;
139
140 fail_sec:
141 file_free(f);
142 fail:
143 return NULL;
144 }
145
146 EXPORT_SYMBOL(get_empty_filp);
147
148 /**
149 * alloc_file - allocate and initialize a 'struct file'
150 * @mnt: the vfsmount on which the file will reside
151 * @dentry: the dentry representing the new file
152 * @mode: the mode with which the new file will be opened
153 * @fop: the 'struct file_operations' for the new file
154 *
155 * Use this instead of get_empty_filp() to get a new
156 * 'struct file'. Do so because of the same initialization
157 * pitfalls reasons listed for init_file(). This is a
158 * preferred interface to using init_file().
159 *
160 * If all the callers of init_file() are eliminated, its
161 * code should be moved into this function.
162 */
163 struct file *alloc_file(struct vfsmount *mnt, struct dentry *dentry,
164 mode_t mode, const struct file_operations *fop)
165 {
166 struct file *file;
167 struct path;
168
169 file = get_empty_filp();
170 if (!file)
171 return NULL;
172
173 init_file(file, mnt, dentry, mode, fop);
174 return file;
175 }
176 EXPORT_SYMBOL(alloc_file);
177
178 /**
179 * init_file - initialize a 'struct file'
180 * @file: the already allocated 'struct file' to initialized
181 * @mnt: the vfsmount on which the file resides
182 * @dentry: the dentry representing this file
183 * @mode: the mode the file is opened with
184 * @fop: the 'struct file_operations' for this file
185 *
186 * Use this instead of setting the members directly. Doing so
187 * avoids making mistakes like forgetting the mntget() or
188 * forgetting to take a write on the mnt.
189 *
190 * Note: This is a crappy interface. It is here to make
191 * merging with the existing users of get_empty_filp()
192 * who have complex failure logic easier. All users
193 * of this should be moving to alloc_file().
194 */
195 int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry,
196 mode_t mode, const struct file_operations *fop)
197 {
198 int error = 0;
199 file->f_path.dentry = dentry;
200 file->f_path.mnt = mntget(mnt);
201 file->f_mapping = dentry->d_inode->i_mapping;
202 file->f_mode = mode;
203 file->f_op = fop;
204
205 /*
206 * These mounts don't really matter in practice
207 * for r/o bind mounts. They aren't userspace-
208 * visible. We do this for consistency, and so
209 * that we can do debugging checks at __fput()
210 */
211 if ((mode & FMODE_WRITE) && !special_file(dentry->d_inode->i_mode)) {
212 file_take_write(file);
213 error = mnt_want_write(mnt);
214 WARN_ON(error);
215 }
216 return error;
217 }
218 EXPORT_SYMBOL(init_file);
219
220 void fput(struct file *file)
221 {
222 if (atomic_dec_and_test(&file->f_count))
223 __fput(file);
224 }
225
226 EXPORT_SYMBOL(fput);
227
228 /**
229 * drop_file_write_access - give up ability to write to a file
230 * @file: the file to which we will stop writing
231 *
232 * This is a central place which will give up the ability
233 * to write to @file, along with access to write through
234 * its vfsmount.
235 */
236 void drop_file_write_access(struct file *file)
237 {
238 struct vfsmount *mnt = file->f_path.mnt;
239 struct dentry *dentry = file->f_path.dentry;
240 struct inode *inode = dentry->d_inode;
241
242 put_write_access(inode);
243
244 if (special_file(inode->i_mode))
245 return;
246 if (file_check_writeable(file) != 0)
247 return;
248 mnt_drop_write(mnt);
249 file_release_write(file);
250 }
251 EXPORT_SYMBOL_GPL(drop_file_write_access);
252
253 /* __fput is called from task context when aio completion releases the last
254 * last use of a struct file *. Do not use otherwise.
255 */
256 void __fput(struct file *file)
257 {
258 struct dentry *dentry = file->f_path.dentry;
259 struct vfsmount *mnt = file->f_path.mnt;
260 struct inode *inode = dentry->d_inode;
261
262 might_sleep();
263
264 fsnotify_close(file);
265 /*
266 * The function eventpoll_release() should be the first called
267 * in the file cleanup chain.
268 */
269 eventpoll_release(file);
270 locks_remove_flock(file);
271
272 if (file->f_op && file->f_op->release)
273 file->f_op->release(inode, file);
274 security_file_free(file);
275 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
276 cdev_put(inode->i_cdev);
277 fops_put(file->f_op);
278 put_pid(file->f_owner.pid);
279 file_kill(file);
280 if (file->f_mode & FMODE_WRITE)
281 drop_file_write_access(file);
282 file->f_path.dentry = NULL;
283 file->f_path.mnt = NULL;
284 file_free(file);
285 dput(dentry);
286 mntput(mnt);
287 }
288
289 struct file *fget(unsigned int fd)
290 {
291 struct file *file;
292 struct files_struct *files = current->files;
293
294 rcu_read_lock();
295 file = fcheck_files(files, fd);
296 if (file) {
297 if (!atomic_inc_not_zero(&file->f_count)) {
298 /* File object ref couldn't be taken */
299 rcu_read_unlock();
300 return NULL;
301 }
302 }
303 rcu_read_unlock();
304
305 return file;
306 }
307
308 EXPORT_SYMBOL(fget);
309
310 /*
311 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
312 * You can use this only if it is guranteed that the current task already
313 * holds a refcnt to that file. That check has to be done at fget() only
314 * and a flag is returned to be passed to the corresponding fput_light().
315 * There must not be a cloning between an fget_light/fput_light pair.
316 */
317 struct file *fget_light(unsigned int fd, int *fput_needed)
318 {
319 struct file *file;
320 struct files_struct *files = current->files;
321
322 *fput_needed = 0;
323 if (likely((atomic_read(&files->count) == 1))) {
324 file = fcheck_files(files, fd);
325 } else {
326 rcu_read_lock();
327 file = fcheck_files(files, fd);
328 if (file) {
329 if (atomic_inc_not_zero(&file->f_count))
330 *fput_needed = 1;
331 else
332 /* Didn't get the reference, someone's freed */
333 file = NULL;
334 }
335 rcu_read_unlock();
336 }
337
338 return file;
339 }
340
341
342 void put_filp(struct file *file)
343 {
344 if (atomic_dec_and_test(&file->f_count)) {
345 security_file_free(file);
346 file_kill(file);
347 file_free(file);
348 }
349 }
350
351 void file_move(struct file *file, struct list_head *list)
352 {
353 if (!list)
354 return;
355 file_list_lock();
356 list_move(&file->f_u.fu_list, list);
357 file_list_unlock();
358 }
359
360 void file_kill(struct file *file)
361 {
362 if (!list_empty(&file->f_u.fu_list)) {
363 file_list_lock();
364 list_del_init(&file->f_u.fu_list);
365 file_list_unlock();
366 }
367 }
368
369 int fs_may_remount_ro(struct super_block *sb)
370 {
371 struct file *file;
372
373 /* Check that no files are currently opened for writing. */
374 file_list_lock();
375 list_for_each_entry(file, &sb->s_files, f_u.fu_list) {
376 struct inode *inode = file->f_path.dentry->d_inode;
377
378 /* File with pending delete? */
379 if (inode->i_nlink == 0)
380 goto too_bad;
381
382 /* Writeable file? */
383 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
384 goto too_bad;
385 }
386 file_list_unlock();
387 return 1; /* Tis' cool bro. */
388 too_bad:
389 file_list_unlock();
390 return 0;
391 }
392
393 void __init files_init(unsigned long mempages)
394 {
395 int n;
396 /* One file with associated inode and dcache is very roughly 1K.
397 * Per default don't use more than 10% of our memory for files.
398 */
399
400 n = (mempages * (PAGE_SIZE / 1024)) / 10;
401 files_stat.max_files = n;
402 if (files_stat.max_files < NR_FILE)
403 files_stat.max_files = NR_FILE;
404 files_defer_init();
405 percpu_counter_init(&nr_files, 0);
406 }