]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/proc/generic.c
Merge branch 'kconfig' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[mirror_ubuntu-bionic-kernel.git] / fs / proc / generic.c
1 /*
2 * proc/fs/generic.c --- generic routines for the proc-fs
3 *
4 * This file contains generic proc-fs routines for handling
5 * directories and files.
6 *
7 * Copyright (C) 1991, 1992 Linus Torvalds.
8 * Copyright (C) 1997 Theodore Ts'o
9 */
10
11 #include <linux/errno.h>
12 #include <linux/time.h>
13 #include <linux/proc_fs.h>
14 #include <linux/stat.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/mount.h>
19 #include <linux/init.h>
20 #include <linux/idr.h>
21 #include <linux/namei.h>
22 #include <linux/bitops.h>
23 #include <linux/spinlock.h>
24 #include <linux/completion.h>
25 #include <asm/uaccess.h>
26
27 #include "internal.h"
28
29 DEFINE_SPINLOCK(proc_subdir_lock);
30
31 static int proc_match(unsigned int len, const char *name, struct proc_dir_entry *de)
32 {
33 if (de->namelen != len)
34 return 0;
35 return !memcmp(name, de->name, len);
36 }
37
38 /* buffer size is one page but our output routines use some slack for overruns */
39 #define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
40
41 static ssize_t
42 __proc_file_read(struct file *file, char __user *buf, size_t nbytes,
43 loff_t *ppos)
44 {
45 struct inode * inode = file_inode(file);
46 char *page;
47 ssize_t retval=0;
48 int eof=0;
49 ssize_t n, count;
50 char *start;
51 struct proc_dir_entry * dp;
52 unsigned long long pos;
53
54 /*
55 * Gaah, please just use "seq_file" instead. The legacy /proc
56 * interfaces cut loff_t down to off_t for reads, and ignore
57 * the offset entirely for writes..
58 */
59 pos = *ppos;
60 if (pos > MAX_NON_LFS)
61 return 0;
62 if (nbytes > MAX_NON_LFS - pos)
63 nbytes = MAX_NON_LFS - pos;
64
65 dp = PDE(inode);
66 if (!(page = (char*) __get_free_page(GFP_TEMPORARY)))
67 return -ENOMEM;
68
69 while ((nbytes > 0) && !eof) {
70 count = min_t(size_t, PROC_BLOCK_SIZE, nbytes);
71
72 start = NULL;
73 if (dp->read_proc) {
74 /*
75 * How to be a proc read function
76 * ------------------------------
77 * Prototype:
78 * int f(char *buffer, char **start, off_t offset,
79 * int count, int *peof, void *dat)
80 *
81 * Assume that the buffer is "count" bytes in size.
82 *
83 * If you know you have supplied all the data you
84 * have, set *peof.
85 *
86 * You have three ways to return data:
87 * 0) Leave *start = NULL. (This is the default.)
88 * Put the data of the requested offset at that
89 * offset within the buffer. Return the number (n)
90 * of bytes there are from the beginning of the
91 * buffer up to the last byte of data. If the
92 * number of supplied bytes (= n - offset) is
93 * greater than zero and you didn't signal eof
94 * and the reader is prepared to take more data
95 * you will be called again with the requested
96 * offset advanced by the number of bytes
97 * absorbed. This interface is useful for files
98 * no larger than the buffer.
99 * 1) Set *start = an unsigned long value less than
100 * the buffer address but greater than zero.
101 * Put the data of the requested offset at the
102 * beginning of the buffer. Return the number of
103 * bytes of data placed there. If this number is
104 * greater than zero and you didn't signal eof
105 * and the reader is prepared to take more data
106 * you will be called again with the requested
107 * offset advanced by *start. This interface is
108 * useful when you have a large file consisting
109 * of a series of blocks which you want to count
110 * and return as wholes.
111 * (Hack by Paul.Russell@rustcorp.com.au)
112 * 2) Set *start = an address within the buffer.
113 * Put the data of the requested offset at *start.
114 * Return the number of bytes of data placed there.
115 * If this number is greater than zero and you
116 * didn't signal eof and the reader is prepared to
117 * take more data you will be called again with the
118 * requested offset advanced by the number of bytes
119 * absorbed.
120 */
121 n = dp->read_proc(page, &start, *ppos,
122 count, &eof, dp->data);
123 } else
124 break;
125
126 if (n == 0) /* end of file */
127 break;
128 if (n < 0) { /* error */
129 if (retval == 0)
130 retval = n;
131 break;
132 }
133
134 if (start == NULL) {
135 if (n > PAGE_SIZE) {
136 printk(KERN_ERR
137 "proc_file_read: Apparent buffer overflow!\n");
138 n = PAGE_SIZE;
139 }
140 n -= *ppos;
141 if (n <= 0)
142 break;
143 if (n > count)
144 n = count;
145 start = page + *ppos;
146 } else if (start < page) {
147 if (n > PAGE_SIZE) {
148 printk(KERN_ERR
149 "proc_file_read: Apparent buffer overflow!\n");
150 n = PAGE_SIZE;
151 }
152 if (n > count) {
153 /*
154 * Don't reduce n because doing so might
155 * cut off part of a data block.
156 */
157 printk(KERN_WARNING
158 "proc_file_read: Read count exceeded\n");
159 }
160 } else /* start >= page */ {
161 unsigned long startoff = (unsigned long)(start - page);
162 if (n > (PAGE_SIZE - startoff)) {
163 printk(KERN_ERR
164 "proc_file_read: Apparent buffer overflow!\n");
165 n = PAGE_SIZE - startoff;
166 }
167 if (n > count)
168 n = count;
169 }
170
171 n -= copy_to_user(buf, start < page ? page : start, n);
172 if (n == 0) {
173 if (retval == 0)
174 retval = -EFAULT;
175 break;
176 }
177
178 *ppos += start < page ? (unsigned long)start : n;
179 nbytes -= n;
180 buf += n;
181 retval += n;
182 }
183 free_page((unsigned long) page);
184 return retval;
185 }
186
187 static ssize_t
188 proc_file_read(struct file *file, char __user *buf, size_t nbytes,
189 loff_t *ppos)
190 {
191 struct proc_dir_entry *pde = PDE(file_inode(file));
192 ssize_t rv = -EIO;
193
194 spin_lock(&pde->pde_unload_lock);
195 if (!pde->proc_fops) {
196 spin_unlock(&pde->pde_unload_lock);
197 return rv;
198 }
199 pde->pde_users++;
200 spin_unlock(&pde->pde_unload_lock);
201
202 rv = __proc_file_read(file, buf, nbytes, ppos);
203
204 pde_users_dec(pde);
205 return rv;
206 }
207
208 static ssize_t
209 proc_file_write(struct file *file, const char __user *buffer,
210 size_t count, loff_t *ppos)
211 {
212 struct proc_dir_entry *pde = PDE(file_inode(file));
213 ssize_t rv = -EIO;
214
215 if (pde->write_proc) {
216 spin_lock(&pde->pde_unload_lock);
217 if (!pde->proc_fops) {
218 spin_unlock(&pde->pde_unload_lock);
219 return rv;
220 }
221 pde->pde_users++;
222 spin_unlock(&pde->pde_unload_lock);
223
224 /* FIXME: does this routine need ppos? probably... */
225 rv = pde->write_proc(file, buffer, count, pde->data);
226 pde_users_dec(pde);
227 }
228 return rv;
229 }
230
231
232 static loff_t
233 proc_file_lseek(struct file *file, loff_t offset, int orig)
234 {
235 loff_t retval = -EINVAL;
236 switch (orig) {
237 case 1:
238 offset += file->f_pos;
239 /* fallthrough */
240 case 0:
241 if (offset < 0 || offset > MAX_NON_LFS)
242 break;
243 file->f_pos = retval = offset;
244 }
245 return retval;
246 }
247
248 static const struct file_operations proc_file_operations = {
249 .llseek = proc_file_lseek,
250 .read = proc_file_read,
251 .write = proc_file_write,
252 };
253
254 static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
255 {
256 struct inode *inode = dentry->d_inode;
257 struct proc_dir_entry *de = PDE(inode);
258 int error;
259
260 error = inode_change_ok(inode, iattr);
261 if (error)
262 return error;
263
264 setattr_copy(inode, iattr);
265 mark_inode_dirty(inode);
266
267 de->uid = inode->i_uid;
268 de->gid = inode->i_gid;
269 de->mode = inode->i_mode;
270 return 0;
271 }
272
273 static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry,
274 struct kstat *stat)
275 {
276 struct inode *inode = dentry->d_inode;
277 struct proc_dir_entry *de = PROC_I(inode)->pde;
278 if (de && de->nlink)
279 set_nlink(inode, de->nlink);
280
281 generic_fillattr(inode, stat);
282 return 0;
283 }
284
285 static const struct inode_operations proc_file_inode_operations = {
286 .setattr = proc_notify_change,
287 };
288
289 /*
290 * This function parses a name such as "tty/driver/serial", and
291 * returns the struct proc_dir_entry for "/proc/tty/driver", and
292 * returns "serial" in residual.
293 */
294 static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret,
295 const char **residual)
296 {
297 const char *cp = name, *next;
298 struct proc_dir_entry *de;
299 unsigned int len;
300
301 de = *ret;
302 if (!de)
303 de = &proc_root;
304
305 while (1) {
306 next = strchr(cp, '/');
307 if (!next)
308 break;
309
310 len = next - cp;
311 for (de = de->subdir; de ; de = de->next) {
312 if (proc_match(len, cp, de))
313 break;
314 }
315 if (!de) {
316 WARN(1, "name '%s'\n", name);
317 return -ENOENT;
318 }
319 cp += len + 1;
320 }
321 *residual = cp;
322 *ret = de;
323 return 0;
324 }
325
326 static int xlate_proc_name(const char *name, struct proc_dir_entry **ret,
327 const char **residual)
328 {
329 int rv;
330
331 spin_lock(&proc_subdir_lock);
332 rv = __xlate_proc_name(name, ret, residual);
333 spin_unlock(&proc_subdir_lock);
334 return rv;
335 }
336
337 static DEFINE_IDA(proc_inum_ida);
338 static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */
339
340 #define PROC_DYNAMIC_FIRST 0xF0000000U
341
342 /*
343 * Return an inode number between PROC_DYNAMIC_FIRST and
344 * 0xffffffff, or zero on failure.
345 */
346 int proc_alloc_inum(unsigned int *inum)
347 {
348 unsigned int i;
349 int error;
350
351 retry:
352 if (!ida_pre_get(&proc_inum_ida, GFP_KERNEL))
353 return -ENOMEM;
354
355 spin_lock_irq(&proc_inum_lock);
356 error = ida_get_new(&proc_inum_ida, &i);
357 spin_unlock_irq(&proc_inum_lock);
358 if (error == -EAGAIN)
359 goto retry;
360 else if (error)
361 return error;
362
363 if (i > UINT_MAX - PROC_DYNAMIC_FIRST) {
364 spin_lock_irq(&proc_inum_lock);
365 ida_remove(&proc_inum_ida, i);
366 spin_unlock_irq(&proc_inum_lock);
367 return -ENOSPC;
368 }
369 *inum = PROC_DYNAMIC_FIRST + i;
370 return 0;
371 }
372
373 void proc_free_inum(unsigned int inum)
374 {
375 unsigned long flags;
376 spin_lock_irqsave(&proc_inum_lock, flags);
377 ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
378 spin_unlock_irqrestore(&proc_inum_lock, flags);
379 }
380
381 static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
382 {
383 nd_set_link(nd, PDE(dentry->d_inode)->data);
384 return NULL;
385 }
386
387 static const struct inode_operations proc_link_inode_operations = {
388 .readlink = generic_readlink,
389 .follow_link = proc_follow_link,
390 };
391
392 /*
393 * As some entries in /proc are volatile, we want to
394 * get rid of unused dentries. This could be made
395 * smarter: we could keep a "volatile" flag in the
396 * inode to indicate which ones to keep.
397 */
398 static int proc_delete_dentry(const struct dentry * dentry)
399 {
400 return 1;
401 }
402
403 static const struct dentry_operations proc_dentry_operations =
404 {
405 .d_delete = proc_delete_dentry,
406 };
407
408 /*
409 * Don't create negative dentries here, return -ENOENT by hand
410 * instead.
411 */
412 struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
413 struct dentry *dentry)
414 {
415 struct inode *inode;
416
417 spin_lock(&proc_subdir_lock);
418 for (de = de->subdir; de ; de = de->next) {
419 if (de->namelen != dentry->d_name.len)
420 continue;
421 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
422 pde_get(de);
423 spin_unlock(&proc_subdir_lock);
424 inode = proc_get_inode(dir->i_sb, de);
425 if (!inode)
426 return ERR_PTR(-ENOMEM);
427 d_set_d_op(dentry, &proc_dentry_operations);
428 d_add(dentry, inode);
429 return NULL;
430 }
431 }
432 spin_unlock(&proc_subdir_lock);
433 return ERR_PTR(-ENOENT);
434 }
435
436 struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
437 unsigned int flags)
438 {
439 return proc_lookup_de(PDE(dir), dir, dentry);
440 }
441
442 /*
443 * This returns non-zero if at EOF, so that the /proc
444 * root directory can use this and check if it should
445 * continue with the <pid> entries..
446 *
447 * Note that the VFS-layer doesn't care about the return
448 * value of the readdir() call, as long as it's non-negative
449 * for success..
450 */
451 int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
452 filldir_t filldir)
453 {
454 unsigned int ino;
455 int i;
456 struct inode *inode = file_inode(filp);
457 int ret = 0;
458
459 ino = inode->i_ino;
460 i = filp->f_pos;
461 switch (i) {
462 case 0:
463 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
464 goto out;
465 i++;
466 filp->f_pos++;
467 /* fall through */
468 case 1:
469 if (filldir(dirent, "..", 2, i,
470 parent_ino(filp->f_path.dentry),
471 DT_DIR) < 0)
472 goto out;
473 i++;
474 filp->f_pos++;
475 /* fall through */
476 default:
477 spin_lock(&proc_subdir_lock);
478 de = de->subdir;
479 i -= 2;
480 for (;;) {
481 if (!de) {
482 ret = 1;
483 spin_unlock(&proc_subdir_lock);
484 goto out;
485 }
486 if (!i)
487 break;
488 de = de->next;
489 i--;
490 }
491
492 do {
493 struct proc_dir_entry *next;
494
495 /* filldir passes info to user space */
496 pde_get(de);
497 spin_unlock(&proc_subdir_lock);
498 if (filldir(dirent, de->name, de->namelen, filp->f_pos,
499 de->low_ino, de->mode >> 12) < 0) {
500 pde_put(de);
501 goto out;
502 }
503 spin_lock(&proc_subdir_lock);
504 filp->f_pos++;
505 next = de->next;
506 pde_put(de);
507 de = next;
508 } while (de);
509 spin_unlock(&proc_subdir_lock);
510 }
511 ret = 1;
512 out:
513 return ret;
514 }
515
516 int proc_readdir(struct file *filp, void *dirent, filldir_t filldir)
517 {
518 struct inode *inode = file_inode(filp);
519
520 return proc_readdir_de(PDE(inode), filp, dirent, filldir);
521 }
522
523 /*
524 * These are the generic /proc directory operations. They
525 * use the in-memory "struct proc_dir_entry" tree to parse
526 * the /proc directory.
527 */
528 static const struct file_operations proc_dir_operations = {
529 .llseek = generic_file_llseek,
530 .read = generic_read_dir,
531 .readdir = proc_readdir,
532 };
533
534 /*
535 * proc directories can do almost nothing..
536 */
537 static const struct inode_operations proc_dir_inode_operations = {
538 .lookup = proc_lookup,
539 .getattr = proc_getattr,
540 .setattr = proc_notify_change,
541 };
542
543 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
544 {
545 struct proc_dir_entry *tmp;
546 int ret;
547
548 ret = proc_alloc_inum(&dp->low_ino);
549 if (ret)
550 return ret;
551
552 if (S_ISDIR(dp->mode)) {
553 if (dp->proc_iops == NULL) {
554 dp->proc_fops = &proc_dir_operations;
555 dp->proc_iops = &proc_dir_inode_operations;
556 }
557 dir->nlink++;
558 } else if (S_ISLNK(dp->mode)) {
559 if (dp->proc_iops == NULL)
560 dp->proc_iops = &proc_link_inode_operations;
561 } else if (S_ISREG(dp->mode)) {
562 if (dp->proc_fops == NULL)
563 dp->proc_fops = &proc_file_operations;
564 if (dp->proc_iops == NULL)
565 dp->proc_iops = &proc_file_inode_operations;
566 }
567
568 spin_lock(&proc_subdir_lock);
569
570 for (tmp = dir->subdir; tmp; tmp = tmp->next)
571 if (strcmp(tmp->name, dp->name) == 0) {
572 WARN(1, KERN_WARNING "proc_dir_entry '%s/%s' already registered\n",
573 dir->name, dp->name);
574 break;
575 }
576
577 dp->next = dir->subdir;
578 dp->parent = dir;
579 dir->subdir = dp;
580 spin_unlock(&proc_subdir_lock);
581
582 return 0;
583 }
584
585 static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
586 const char *name,
587 umode_t mode,
588 nlink_t nlink)
589 {
590 struct proc_dir_entry *ent = NULL;
591 const char *fn = name;
592 unsigned int len;
593
594 /* make sure name is valid */
595 if (!name || !strlen(name))
596 goto out;
597
598 if (xlate_proc_name(name, parent, &fn) != 0)
599 goto out;
600
601 /* At this point there must not be any '/' characters beyond *fn */
602 if (strchr(fn, '/'))
603 goto out;
604
605 len = strlen(fn);
606
607 ent = kzalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
608 if (!ent)
609 goto out;
610
611 memcpy(ent->name, fn, len + 1);
612 ent->namelen = len;
613 ent->mode = mode;
614 ent->nlink = nlink;
615 atomic_set(&ent->count, 1);
616 spin_lock_init(&ent->pde_unload_lock);
617 INIT_LIST_HEAD(&ent->pde_openers);
618 out:
619 return ent;
620 }
621
622 struct proc_dir_entry *proc_symlink(const char *name,
623 struct proc_dir_entry *parent, const char *dest)
624 {
625 struct proc_dir_entry *ent;
626
627 ent = __proc_create(&parent, name,
628 (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1);
629
630 if (ent) {
631 ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL);
632 if (ent->data) {
633 strcpy((char*)ent->data,dest);
634 if (proc_register(parent, ent) < 0) {
635 kfree(ent->data);
636 kfree(ent);
637 ent = NULL;
638 }
639 } else {
640 kfree(ent);
641 ent = NULL;
642 }
643 }
644 return ent;
645 }
646 EXPORT_SYMBOL(proc_symlink);
647
648 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
649 struct proc_dir_entry *parent)
650 {
651 struct proc_dir_entry *ent;
652
653 ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
654 if (ent) {
655 if (proc_register(parent, ent) < 0) {
656 kfree(ent);
657 ent = NULL;
658 }
659 }
660 return ent;
661 }
662 EXPORT_SYMBOL(proc_mkdir_mode);
663
664 struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
665 struct proc_dir_entry *parent)
666 {
667 struct proc_dir_entry *ent;
668
669 ent = __proc_create(&parent, name, S_IFDIR | S_IRUGO | S_IXUGO, 2);
670 if (ent) {
671 ent->data = net;
672 if (proc_register(parent, ent) < 0) {
673 kfree(ent);
674 ent = NULL;
675 }
676 }
677 return ent;
678 }
679 EXPORT_SYMBOL_GPL(proc_net_mkdir);
680
681 struct proc_dir_entry *proc_mkdir(const char *name,
682 struct proc_dir_entry *parent)
683 {
684 return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent);
685 }
686 EXPORT_SYMBOL(proc_mkdir);
687
688 struct proc_dir_entry *create_proc_entry(const char *name, umode_t mode,
689 struct proc_dir_entry *parent)
690 {
691 struct proc_dir_entry *ent;
692 nlink_t nlink;
693
694 if (S_ISDIR(mode)) {
695 if ((mode & S_IALLUGO) == 0)
696 mode |= S_IRUGO | S_IXUGO;
697 nlink = 2;
698 } else {
699 if ((mode & S_IFMT) == 0)
700 mode |= S_IFREG;
701 if ((mode & S_IALLUGO) == 0)
702 mode |= S_IRUGO;
703 nlink = 1;
704 }
705
706 ent = __proc_create(&parent, name, mode, nlink);
707 if (ent) {
708 if (proc_register(parent, ent) < 0) {
709 kfree(ent);
710 ent = NULL;
711 }
712 }
713 return ent;
714 }
715 EXPORT_SYMBOL(create_proc_entry);
716
717 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
718 struct proc_dir_entry *parent,
719 const struct file_operations *proc_fops,
720 void *data)
721 {
722 struct proc_dir_entry *pde;
723 nlink_t nlink;
724
725 if (S_ISDIR(mode)) {
726 if ((mode & S_IALLUGO) == 0)
727 mode |= S_IRUGO | S_IXUGO;
728 nlink = 2;
729 } else {
730 if ((mode & S_IFMT) == 0)
731 mode |= S_IFREG;
732 if ((mode & S_IALLUGO) == 0)
733 mode |= S_IRUGO;
734 nlink = 1;
735 }
736
737 pde = __proc_create(&parent, name, mode, nlink);
738 if (!pde)
739 goto out;
740 pde->proc_fops = proc_fops;
741 pde->data = data;
742 if (proc_register(parent, pde) < 0)
743 goto out_free;
744 return pde;
745 out_free:
746 kfree(pde);
747 out:
748 return NULL;
749 }
750 EXPORT_SYMBOL(proc_create_data);
751
752 static void free_proc_entry(struct proc_dir_entry *de)
753 {
754 proc_free_inum(de->low_ino);
755
756 if (S_ISLNK(de->mode))
757 kfree(de->data);
758 kfree(de);
759 }
760
761 void pde_put(struct proc_dir_entry *pde)
762 {
763 if (atomic_dec_and_test(&pde->count))
764 free_proc_entry(pde);
765 }
766
767 /*
768 * Remove a /proc entry and free it if it's not currently in use.
769 */
770 void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
771 {
772 struct proc_dir_entry **p;
773 struct proc_dir_entry *de = NULL;
774 const char *fn = name;
775 unsigned int len;
776
777 spin_lock(&proc_subdir_lock);
778 if (__xlate_proc_name(name, &parent, &fn) != 0) {
779 spin_unlock(&proc_subdir_lock);
780 return;
781 }
782 len = strlen(fn);
783
784 for (p = &parent->subdir; *p; p=&(*p)->next ) {
785 if (proc_match(len, fn, *p)) {
786 de = *p;
787 *p = de->next;
788 de->next = NULL;
789 break;
790 }
791 }
792 spin_unlock(&proc_subdir_lock);
793 if (!de) {
794 WARN(1, "name '%s'\n", name);
795 return;
796 }
797
798 spin_lock(&de->pde_unload_lock);
799 /*
800 * Stop accepting new callers into module. If you're
801 * dynamically allocating ->proc_fops, save a pointer somewhere.
802 */
803 de->proc_fops = NULL;
804 /* Wait until all existing callers into module are done. */
805 if (de->pde_users > 0) {
806 DECLARE_COMPLETION_ONSTACK(c);
807
808 if (!de->pde_unload_completion)
809 de->pde_unload_completion = &c;
810
811 spin_unlock(&de->pde_unload_lock);
812
813 wait_for_completion(de->pde_unload_completion);
814
815 spin_lock(&de->pde_unload_lock);
816 }
817
818 while (!list_empty(&de->pde_openers)) {
819 struct pde_opener *pdeo;
820
821 pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh);
822 list_del(&pdeo->lh);
823 spin_unlock(&de->pde_unload_lock);
824 pdeo->release(pdeo->inode, pdeo->file);
825 kfree(pdeo);
826 spin_lock(&de->pde_unload_lock);
827 }
828 spin_unlock(&de->pde_unload_lock);
829
830 if (S_ISDIR(de->mode))
831 parent->nlink--;
832 de->nlink = 0;
833 WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory "
834 "'%s/%s', leaking at least '%s'\n", __func__,
835 de->parent->name, de->name, de->subdir->name);
836 pde_put(de);
837 }
838 EXPORT_SYMBOL(remove_proc_entry);