1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/proc/inode.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
8 #include <linux/cache.h>
9 #include <linux/time.h>
10 #include <linux/proc_fs.h>
11 #include <linux/kernel.h>
12 #include <linux/pid_namespace.h>
14 #include <linux/string.h>
15 #include <linux/stat.h>
16 #include <linux/completion.h>
17 #include <linux/poll.h>
18 #include <linux/printk.h>
19 #include <linux/file.h>
20 #include <linux/limits.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/sysctl.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/mount.h>
28 #include <linux/uaccess.h>
32 static void proc_evict_inode(struct inode
*inode
)
34 struct proc_dir_entry
*de
;
35 struct ctl_table_header
*head
;
36 struct proc_inode
*ei
= PROC_I(inode
);
38 truncate_inode_pages_final(&inode
->i_data
);
41 /* Stop tracking associated processes */
43 proc_pid_evict_inode(ei
);
47 /* Let go of any associated proc directory entry */
56 RCU_INIT_POINTER(ei
->sysctl
, NULL
);
57 proc_sys_evict_inode(inode
, head
);
61 static struct kmem_cache
*proc_inode_cachep __ro_after_init
;
62 static struct kmem_cache
*pde_opener_cache __ro_after_init
;
64 static struct inode
*proc_alloc_inode(struct super_block
*sb
)
66 struct proc_inode
*ei
;
68 ei
= kmem_cache_alloc(proc_inode_cachep
, GFP_KERNEL
);
73 ei
->op
.proc_get_link
= NULL
;
76 ei
->sysctl_entry
= NULL
;
77 INIT_HLIST_NODE(&ei
->sibling_inodes
);
79 return &ei
->vfs_inode
;
82 static void proc_free_inode(struct inode
*inode
)
84 kmem_cache_free(proc_inode_cachep
, PROC_I(inode
));
87 static void init_once(void *foo
)
89 struct proc_inode
*ei
= (struct proc_inode
*) foo
;
91 inode_init_once(&ei
->vfs_inode
);
94 void __init
proc_init_kmemcache(void)
96 proc_inode_cachep
= kmem_cache_create("proc_inode_cache",
97 sizeof(struct proc_inode
),
98 0, (SLAB_RECLAIM_ACCOUNT
|
99 SLAB_MEM_SPREAD
|SLAB_ACCOUNT
|
103 kmem_cache_create("pde_opener", sizeof(struct pde_opener
), 0,
104 SLAB_ACCOUNT
|SLAB_PANIC
, NULL
);
105 proc_dir_entry_cache
= kmem_cache_create_usercopy(
106 "proc_dir_entry", SIZEOF_PDE
, 0, SLAB_PANIC
,
107 offsetof(struct proc_dir_entry
, inline_name
),
108 SIZEOF_PDE_INLINE_NAME
, NULL
);
109 BUILD_BUG_ON(sizeof(struct proc_dir_entry
) >= SIZEOF_PDE
);
112 void proc_invalidate_siblings_dcache(struct hlist_head
*inodes
, spinlock_t
*lock
)
115 struct proc_inode
*ei
;
116 struct hlist_node
*node
;
117 struct super_block
*old_sb
= NULL
;
121 struct super_block
*sb
;
122 node
= hlist_first_rcu(inodes
);
125 ei
= hlist_entry(node
, struct proc_inode
, sibling_inodes
);
127 hlist_del_init_rcu(&ei
->sibling_inodes
);
130 inode
= &ei
->vfs_inode
;
132 if ((sb
!= old_sb
) && !atomic_inc_not_zero(&sb
->s_active
))
134 inode
= igrab(inode
);
138 deactivate_super(old_sb
);
141 if (unlikely(!inode
)) {
146 if (S_ISDIR(inode
->i_mode
)) {
147 struct dentry
*dir
= d_find_any_alias(inode
);
153 struct dentry
*dentry
;
154 while ((dentry
= d_find_alias(inode
))) {
155 d_invalidate(dentry
);
165 deactivate_super(old_sb
);
168 static int proc_show_options(struct seq_file
*seq
, struct dentry
*root
)
170 struct super_block
*sb
= root
->d_sb
;
171 struct pid_namespace
*pid
= sb
->s_fs_info
;
173 if (!gid_eq(pid
->pid_gid
, GLOBAL_ROOT_GID
))
174 seq_printf(seq
, ",gid=%u", from_kgid_munged(&init_user_ns
, pid
->pid_gid
));
175 if (pid
->hide_pid
!= HIDEPID_OFF
)
176 seq_printf(seq
, ",hidepid=%u", pid
->hide_pid
);
181 const struct super_operations proc_sops
= {
182 .alloc_inode
= proc_alloc_inode
,
183 .free_inode
= proc_free_inode
,
184 .drop_inode
= generic_delete_inode
,
185 .evict_inode
= proc_evict_inode
,
186 .statfs
= simple_statfs
,
187 .show_options
= proc_show_options
,
190 enum {BIAS
= -1U<<31};
192 static inline int use_pde(struct proc_dir_entry
*pde
)
194 return likely(atomic_inc_unless_negative(&pde
->in_use
));
197 static void unuse_pde(struct proc_dir_entry
*pde
)
199 if (unlikely(atomic_dec_return(&pde
->in_use
) == BIAS
))
200 complete(pde
->pde_unload_completion
);
203 /* pde is locked on entry, unlocked on exit */
204 static void close_pdeo(struct proc_dir_entry
*pde
, struct pde_opener
*pdeo
)
205 __releases(&pde
->pde_unload_lock
)
208 * close() (proc_reg_release()) can't delete an entry and proceed:
209 * ->release hook needs to be available at the right moment.
211 * rmmod (remove_proc_entry() et al) can't delete an entry and proceed:
212 * "struct file" needs to be available at the right moment.
214 * Therefore, first process to enter this function does ->release() and
215 * signals its completion to the other process which does nothing.
218 /* somebody else is doing that, just wait */
219 DECLARE_COMPLETION_ONSTACK(c
);
221 spin_unlock(&pde
->pde_unload_lock
);
222 wait_for_completion(&c
);
225 struct completion
*c
;
227 pdeo
->closing
= true;
228 spin_unlock(&pde
->pde_unload_lock
);
230 pde
->proc_ops
->proc_release(file_inode(file
), file
);
231 spin_lock(&pde
->pde_unload_lock
);
232 /* After ->release. */
235 spin_unlock(&pde
->pde_unload_lock
);
238 kmem_cache_free(pde_opener_cache
, pdeo
);
242 void proc_entry_rundown(struct proc_dir_entry
*de
)
244 DECLARE_COMPLETION_ONSTACK(c
);
245 /* Wait until all existing callers into module are done. */
246 de
->pde_unload_completion
= &c
;
247 if (atomic_add_return(BIAS
, &de
->in_use
) != BIAS
)
248 wait_for_completion(&c
);
250 /* ->pde_openers list can't grow from now on. */
252 spin_lock(&de
->pde_unload_lock
);
253 while (!list_empty(&de
->pde_openers
)) {
254 struct pde_opener
*pdeo
;
255 pdeo
= list_first_entry(&de
->pde_openers
, struct pde_opener
, lh
);
256 close_pdeo(de
, pdeo
);
257 spin_lock(&de
->pde_unload_lock
);
259 spin_unlock(&de
->pde_unload_lock
);
262 static loff_t
proc_reg_llseek(struct file
*file
, loff_t offset
, int whence
)
264 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
267 typeof_member(struct proc_ops
, proc_lseek
) lseek
;
269 lseek
= pde
->proc_ops
->proc_lseek
;
271 lseek
= default_llseek
;
272 rv
= lseek(file
, offset
, whence
);
278 static ssize_t
proc_reg_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
280 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
283 typeof_member(struct proc_ops
, proc_read
) read
;
285 read
= pde
->proc_ops
->proc_read
;
287 rv
= read(file
, buf
, count
, ppos
);
293 static ssize_t
proc_reg_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*ppos
)
295 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
298 typeof_member(struct proc_ops
, proc_write
) write
;
300 write
= pde
->proc_ops
->proc_write
;
302 rv
= write(file
, buf
, count
, ppos
);
308 static __poll_t
proc_reg_poll(struct file
*file
, struct poll_table_struct
*pts
)
310 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
311 __poll_t rv
= DEFAULT_POLLMASK
;
313 typeof_member(struct proc_ops
, proc_poll
) poll
;
315 poll
= pde
->proc_ops
->proc_poll
;
317 rv
= poll(file
, pts
);
323 static long proc_reg_unlocked_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
325 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
328 typeof_member(struct proc_ops
, proc_ioctl
) ioctl
;
330 ioctl
= pde
->proc_ops
->proc_ioctl
;
332 rv
= ioctl(file
, cmd
, arg
);
339 static long proc_reg_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
341 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
344 typeof_member(struct proc_ops
, proc_compat_ioctl
) compat_ioctl
;
346 compat_ioctl
= pde
->proc_ops
->proc_compat_ioctl
;
348 rv
= compat_ioctl(file
, cmd
, arg
);
355 static int proc_reg_mmap(struct file
*file
, struct vm_area_struct
*vma
)
357 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
360 typeof_member(struct proc_ops
, proc_mmap
) mmap
;
362 mmap
= pde
->proc_ops
->proc_mmap
;
364 rv
= mmap(file
, vma
);
371 proc_reg_get_unmapped_area(struct file
*file
, unsigned long orig_addr
,
372 unsigned long len
, unsigned long pgoff
,
375 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
376 unsigned long rv
= -EIO
;
379 typeof_member(struct proc_ops
, proc_get_unmapped_area
) get_area
;
381 get_area
= pde
->proc_ops
->proc_get_unmapped_area
;
384 get_area
= current
->mm
->get_unmapped_area
;
388 rv
= get_area(file
, orig_addr
, len
, pgoff
, flags
);
396 static int proc_reg_open(struct inode
*inode
, struct file
*file
)
398 struct proc_dir_entry
*pde
= PDE(inode
);
400 typeof_member(struct proc_ops
, proc_open
) open
;
401 typeof_member(struct proc_ops
, proc_release
) release
;
402 struct pde_opener
*pdeo
;
406 * 1) PDE's ->release hook will be called no matter what
407 * either normally by close()/->release, or forcefully by
408 * rmmod/remove_proc_entry.
410 * 2) rmmod isn't blocked by opening file in /proc and sitting on
411 * the descriptor (including "rmmod foo </proc/foo" scenario).
413 * Save every "struct file" with custom ->release hook.
418 release
= pde
->proc_ops
->proc_release
;
420 pdeo
= kmem_cache_alloc(pde_opener_cache
, GFP_KERNEL
);
427 open
= pde
->proc_ops
->proc_open
;
429 rv
= open(inode
, file
);
433 /* To know what to release. */
435 pdeo
->closing
= false;
437 spin_lock(&pde
->pde_unload_lock
);
438 list_add(&pdeo
->lh
, &pde
->pde_openers
);
439 spin_unlock(&pde
->pde_unload_lock
);
441 kmem_cache_free(pde_opener_cache
, pdeo
);
449 static int proc_reg_release(struct inode
*inode
, struct file
*file
)
451 struct proc_dir_entry
*pde
= PDE(inode
);
452 struct pde_opener
*pdeo
;
453 spin_lock(&pde
->pde_unload_lock
);
454 list_for_each_entry(pdeo
, &pde
->pde_openers
, lh
) {
455 if (pdeo
->file
== file
) {
456 close_pdeo(pde
, pdeo
);
460 spin_unlock(&pde
->pde_unload_lock
);
464 static const struct file_operations proc_reg_file_ops
= {
465 .llseek
= proc_reg_llseek
,
466 .read
= proc_reg_read
,
467 .write
= proc_reg_write
,
468 .poll
= proc_reg_poll
,
469 .unlocked_ioctl
= proc_reg_unlocked_ioctl
,
471 .compat_ioctl
= proc_reg_compat_ioctl
,
473 .mmap
= proc_reg_mmap
,
474 .get_unmapped_area
= proc_reg_get_unmapped_area
,
475 .open
= proc_reg_open
,
476 .release
= proc_reg_release
,
480 static const struct file_operations proc_reg_file_ops_no_compat
= {
481 .llseek
= proc_reg_llseek
,
482 .read
= proc_reg_read
,
483 .write
= proc_reg_write
,
484 .poll
= proc_reg_poll
,
485 .unlocked_ioctl
= proc_reg_unlocked_ioctl
,
486 .mmap
= proc_reg_mmap
,
487 .get_unmapped_area
= proc_reg_get_unmapped_area
,
488 .open
= proc_reg_open
,
489 .release
= proc_reg_release
,
493 static void proc_put_link(void *p
)
498 static const char *proc_get_link(struct dentry
*dentry
,
500 struct delayed_call
*done
)
502 struct proc_dir_entry
*pde
= PDE(inode
);
504 return ERR_PTR(-EINVAL
);
505 set_delayed_call(done
, proc_put_link
, pde
);
509 const struct inode_operations proc_link_inode_operations
= {
510 .get_link
= proc_get_link
,
513 struct inode
*proc_get_inode(struct super_block
*sb
, struct proc_dir_entry
*de
)
515 struct inode
*inode
= new_inode_pseudo(sb
);
518 inode
->i_ino
= de
->low_ino
;
519 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= current_time(inode
);
520 PROC_I(inode
)->pde
= de
;
522 if (is_empty_pde(de
)) {
523 make_empty_dir_inode(inode
);
527 inode
->i_mode
= de
->mode
;
528 inode
->i_uid
= de
->uid
;
529 inode
->i_gid
= de
->gid
;
532 inode
->i_size
= de
->size
;
534 set_nlink(inode
, de
->nlink
);
536 if (S_ISREG(inode
->i_mode
)) {
537 inode
->i_op
= de
->proc_iops
;
538 inode
->i_fop
= &proc_reg_file_ops
;
540 if (!de
->proc_ops
->proc_compat_ioctl
) {
541 inode
->i_fop
= &proc_reg_file_ops_no_compat
;
544 } else if (S_ISDIR(inode
->i_mode
)) {
545 inode
->i_op
= de
->proc_iops
;
546 inode
->i_fop
= de
->proc_dir_ops
;
547 } else if (S_ISLNK(inode
->i_mode
)) {
548 inode
->i_op
= de
->proc_iops
;