]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/proc/generic.c
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
[mirror_ubuntu-bionic-kernel.git] / fs / proc / generic.c
1 /*
2 * proc/fs/generic.c --- generic routines for the proc-fs
3 *
4 * This file contains generic proc-fs routines for handling
5 * directories and files.
6 *
7 * Copyright (C) 1991, 1992 Linus Torvalds.
8 * Copyright (C) 1997 Theodore Ts'o
9 */
10
11 #include <linux/errno.h>
12 #include <linux/time.h>
13 #include <linux/proc_fs.h>
14 #include <linux/stat.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/printk.h>
19 #include <linux/mount.h>
20 #include <linux/init.h>
21 #include <linux/idr.h>
22 #include <linux/namei.h>
23 #include <linux/bitops.h>
24 #include <linux/spinlock.h>
25 #include <linux/completion.h>
26 #include <asm/uaccess.h>
27
28 #include "internal.h"
29
30 DEFINE_SPINLOCK(proc_subdir_lock);
31
32 static int proc_match(unsigned int len, const char *name, struct proc_dir_entry *de)
33 {
34 if (de->namelen != len)
35 return 0;
36 return !memcmp(name, de->name, len);
37 }
38
39 /* buffer size is one page but our output routines use some slack for overruns */
40 #define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
41
42 static ssize_t
43 __proc_file_read(struct file *file, char __user *buf, size_t nbytes,
44 loff_t *ppos)
45 {
46 struct inode * inode = file_inode(file);
47 char *page;
48 ssize_t retval=0;
49 int eof=0;
50 ssize_t n, count;
51 char *start;
52 struct proc_dir_entry * dp;
53 unsigned long long pos;
54
55 /*
56 * Gaah, please just use "seq_file" instead. The legacy /proc
57 * interfaces cut loff_t down to off_t for reads, and ignore
58 * the offset entirely for writes..
59 */
60 pos = *ppos;
61 if (pos > MAX_NON_LFS)
62 return 0;
63 if (nbytes > MAX_NON_LFS - pos)
64 nbytes = MAX_NON_LFS - pos;
65
66 dp = PDE(inode);
67 if (!(page = (char*) __get_free_page(GFP_TEMPORARY)))
68 return -ENOMEM;
69
70 while ((nbytes > 0) && !eof) {
71 count = min_t(size_t, PROC_BLOCK_SIZE, nbytes);
72
73 start = NULL;
74 if (dp->read_proc) {
75 /*
76 * How to be a proc read function
77 * ------------------------------
78 * Prototype:
79 * int f(char *buffer, char **start, off_t offset,
80 * int count, int *peof, void *dat)
81 *
82 * Assume that the buffer is "count" bytes in size.
83 *
84 * If you know you have supplied all the data you
85 * have, set *peof.
86 *
87 * You have three ways to return data:
88 * 0) Leave *start = NULL. (This is the default.)
89 * Put the data of the requested offset at that
90 * offset within the buffer. Return the number (n)
91 * of bytes there are from the beginning of the
92 * buffer up to the last byte of data. If the
93 * number of supplied bytes (= n - offset) is
94 * greater than zero and you didn't signal eof
95 * and the reader is prepared to take more data
96 * you will be called again with the requested
97 * offset advanced by the number of bytes
98 * absorbed. This interface is useful for files
99 * no larger than the buffer.
100 * 1) Set *start = an unsigned long value less than
101 * the buffer address but greater than zero.
102 * Put the data of the requested offset at the
103 * beginning of the buffer. Return the number of
104 * bytes of data placed there. If this number is
105 * greater than zero and you didn't signal eof
106 * and the reader is prepared to take more data
107 * you will be called again with the requested
108 * offset advanced by *start. This interface is
109 * useful when you have a large file consisting
110 * of a series of blocks which you want to count
111 * and return as wholes.
112 * (Hack by Paul.Russell@rustcorp.com.au)
113 * 2) Set *start = an address within the buffer.
114 * Put the data of the requested offset at *start.
115 * Return the number of bytes of data placed there.
116 * If this number is greater than zero and you
117 * didn't signal eof and the reader is prepared to
118 * take more data you will be called again with the
119 * requested offset advanced by the number of bytes
120 * absorbed.
121 */
122 n = dp->read_proc(page, &start, *ppos,
123 count, &eof, dp->data);
124 } else
125 break;
126
127 if (n == 0) /* end of file */
128 break;
129 if (n < 0) { /* error */
130 if (retval == 0)
131 retval = n;
132 break;
133 }
134
135 if (start == NULL) {
136 if (n > PAGE_SIZE) /* Apparent buffer overflow */
137 n = PAGE_SIZE;
138 n -= *ppos;
139 if (n <= 0)
140 break;
141 if (n > count)
142 n = count;
143 start = page + *ppos;
144 } else if (start < page) {
145 if (n > PAGE_SIZE) /* Apparent buffer overflow */
146 n = PAGE_SIZE;
147 if (n > count) {
148 /*
149 * Don't reduce n because doing so might
150 * cut off part of a data block.
151 */
152 pr_warn("proc_file_read: count exceeded\n");
153 }
154 } else /* start >= page */ {
155 unsigned long startoff = (unsigned long)(start - page);
156 if (n > (PAGE_SIZE - startoff)) /* buffer overflow? */
157 n = PAGE_SIZE - startoff;
158 if (n > count)
159 n = count;
160 }
161
162 n -= copy_to_user(buf, start < page ? page : start, n);
163 if (n == 0) {
164 if (retval == 0)
165 retval = -EFAULT;
166 break;
167 }
168
169 *ppos += start < page ? (unsigned long)start : n;
170 nbytes -= n;
171 buf += n;
172 retval += n;
173 }
174 free_page((unsigned long) page);
175 return retval;
176 }
177
178 static ssize_t
179 proc_file_read(struct file *file, char __user *buf, size_t nbytes,
180 loff_t *ppos)
181 {
182 struct proc_dir_entry *pde = PDE(file_inode(file));
183 ssize_t rv = -EIO;
184
185 spin_lock(&pde->pde_unload_lock);
186 if (!pde->proc_fops) {
187 spin_unlock(&pde->pde_unload_lock);
188 return rv;
189 }
190 pde->pde_users++;
191 spin_unlock(&pde->pde_unload_lock);
192
193 rv = __proc_file_read(file, buf, nbytes, ppos);
194
195 pde_users_dec(pde);
196 return rv;
197 }
198
199 static ssize_t
200 proc_file_write(struct file *file, const char __user *buffer,
201 size_t count, loff_t *ppos)
202 {
203 struct proc_dir_entry *pde = PDE(file_inode(file));
204 ssize_t rv = -EIO;
205
206 if (pde->write_proc) {
207 spin_lock(&pde->pde_unload_lock);
208 if (!pde->proc_fops) {
209 spin_unlock(&pde->pde_unload_lock);
210 return rv;
211 }
212 pde->pde_users++;
213 spin_unlock(&pde->pde_unload_lock);
214
215 /* FIXME: does this routine need ppos? probably... */
216 rv = pde->write_proc(file, buffer, count, pde->data);
217 pde_users_dec(pde);
218 }
219 return rv;
220 }
221
222
223 static loff_t
224 proc_file_lseek(struct file *file, loff_t offset, int orig)
225 {
226 loff_t retval = -EINVAL;
227 switch (orig) {
228 case 1:
229 offset += file->f_pos;
230 /* fallthrough */
231 case 0:
232 if (offset < 0 || offset > MAX_NON_LFS)
233 break;
234 file->f_pos = retval = offset;
235 }
236 return retval;
237 }
238
239 static const struct file_operations proc_file_operations = {
240 .llseek = proc_file_lseek,
241 .read = proc_file_read,
242 .write = proc_file_write,
243 };
244
245 static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
246 {
247 struct inode *inode = dentry->d_inode;
248 struct proc_dir_entry *de = PDE(inode);
249 int error;
250
251 error = inode_change_ok(inode, iattr);
252 if (error)
253 return error;
254
255 setattr_copy(inode, iattr);
256 mark_inode_dirty(inode);
257
258 de->uid = inode->i_uid;
259 de->gid = inode->i_gid;
260 de->mode = inode->i_mode;
261 return 0;
262 }
263
264 static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry,
265 struct kstat *stat)
266 {
267 struct inode *inode = dentry->d_inode;
268 struct proc_dir_entry *de = PROC_I(inode)->pde;
269 if (de && de->nlink)
270 set_nlink(inode, de->nlink);
271
272 generic_fillattr(inode, stat);
273 return 0;
274 }
275
276 static const struct inode_operations proc_file_inode_operations = {
277 .setattr = proc_notify_change,
278 };
279
280 /*
281 * This function parses a name such as "tty/driver/serial", and
282 * returns the struct proc_dir_entry for "/proc/tty/driver", and
283 * returns "serial" in residual.
284 */
285 static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret,
286 const char **residual)
287 {
288 const char *cp = name, *next;
289 struct proc_dir_entry *de;
290 unsigned int len;
291
292 de = *ret;
293 if (!de)
294 de = &proc_root;
295
296 while (1) {
297 next = strchr(cp, '/');
298 if (!next)
299 break;
300
301 len = next - cp;
302 for (de = de->subdir; de ; de = de->next) {
303 if (proc_match(len, cp, de))
304 break;
305 }
306 if (!de) {
307 WARN(1, "name '%s'\n", name);
308 return -ENOENT;
309 }
310 cp += len + 1;
311 }
312 *residual = cp;
313 *ret = de;
314 return 0;
315 }
316
317 static int xlate_proc_name(const char *name, struct proc_dir_entry **ret,
318 const char **residual)
319 {
320 int rv;
321
322 spin_lock(&proc_subdir_lock);
323 rv = __xlate_proc_name(name, ret, residual);
324 spin_unlock(&proc_subdir_lock);
325 return rv;
326 }
327
328 static DEFINE_IDA(proc_inum_ida);
329 static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */
330
331 #define PROC_DYNAMIC_FIRST 0xF0000000U
332
333 /*
334 * Return an inode number between PROC_DYNAMIC_FIRST and
335 * 0xffffffff, or zero on failure.
336 */
337 int proc_alloc_inum(unsigned int *inum)
338 {
339 unsigned int i;
340 int error;
341
342 retry:
343 if (!ida_pre_get(&proc_inum_ida, GFP_KERNEL))
344 return -ENOMEM;
345
346 spin_lock_irq(&proc_inum_lock);
347 error = ida_get_new(&proc_inum_ida, &i);
348 spin_unlock_irq(&proc_inum_lock);
349 if (error == -EAGAIN)
350 goto retry;
351 else if (error)
352 return error;
353
354 if (i > UINT_MAX - PROC_DYNAMIC_FIRST) {
355 spin_lock_irq(&proc_inum_lock);
356 ida_remove(&proc_inum_ida, i);
357 spin_unlock_irq(&proc_inum_lock);
358 return -ENOSPC;
359 }
360 *inum = PROC_DYNAMIC_FIRST + i;
361 return 0;
362 }
363
364 void proc_free_inum(unsigned int inum)
365 {
366 unsigned long flags;
367 spin_lock_irqsave(&proc_inum_lock, flags);
368 ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
369 spin_unlock_irqrestore(&proc_inum_lock, flags);
370 }
371
372 static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
373 {
374 nd_set_link(nd, PDE(dentry->d_inode)->data);
375 return NULL;
376 }
377
378 static const struct inode_operations proc_link_inode_operations = {
379 .readlink = generic_readlink,
380 .follow_link = proc_follow_link,
381 };
382
383 /*
384 * As some entries in /proc are volatile, we want to
385 * get rid of unused dentries. This could be made
386 * smarter: we could keep a "volatile" flag in the
387 * inode to indicate which ones to keep.
388 */
389 static int proc_delete_dentry(const struct dentry * dentry)
390 {
391 return 1;
392 }
393
394 static const struct dentry_operations proc_dentry_operations =
395 {
396 .d_delete = proc_delete_dentry,
397 };
398
399 /*
400 * Don't create negative dentries here, return -ENOENT by hand
401 * instead.
402 */
403 struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
404 struct dentry *dentry)
405 {
406 struct inode *inode;
407
408 spin_lock(&proc_subdir_lock);
409 for (de = de->subdir; de ; de = de->next) {
410 if (de->namelen != dentry->d_name.len)
411 continue;
412 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
413 pde_get(de);
414 spin_unlock(&proc_subdir_lock);
415 inode = proc_get_inode(dir->i_sb, de);
416 if (!inode)
417 return ERR_PTR(-ENOMEM);
418 d_set_d_op(dentry, &proc_dentry_operations);
419 d_add(dentry, inode);
420 return NULL;
421 }
422 }
423 spin_unlock(&proc_subdir_lock);
424 return ERR_PTR(-ENOENT);
425 }
426
427 struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
428 unsigned int flags)
429 {
430 return proc_lookup_de(PDE(dir), dir, dentry);
431 }
432
433 /*
434 * This returns non-zero if at EOF, so that the /proc
435 * root directory can use this and check if it should
436 * continue with the <pid> entries..
437 *
438 * Note that the VFS-layer doesn't care about the return
439 * value of the readdir() call, as long as it's non-negative
440 * for success..
441 */
442 int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
443 filldir_t filldir)
444 {
445 unsigned int ino;
446 int i;
447 struct inode *inode = file_inode(filp);
448 int ret = 0;
449
450 ino = inode->i_ino;
451 i = filp->f_pos;
452 switch (i) {
453 case 0:
454 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
455 goto out;
456 i++;
457 filp->f_pos++;
458 /* fall through */
459 case 1:
460 if (filldir(dirent, "..", 2, i,
461 parent_ino(filp->f_path.dentry),
462 DT_DIR) < 0)
463 goto out;
464 i++;
465 filp->f_pos++;
466 /* fall through */
467 default:
468 spin_lock(&proc_subdir_lock);
469 de = de->subdir;
470 i -= 2;
471 for (;;) {
472 if (!de) {
473 ret = 1;
474 spin_unlock(&proc_subdir_lock);
475 goto out;
476 }
477 if (!i)
478 break;
479 de = de->next;
480 i--;
481 }
482
483 do {
484 struct proc_dir_entry *next;
485
486 /* filldir passes info to user space */
487 pde_get(de);
488 spin_unlock(&proc_subdir_lock);
489 if (filldir(dirent, de->name, de->namelen, filp->f_pos,
490 de->low_ino, de->mode >> 12) < 0) {
491 pde_put(de);
492 goto out;
493 }
494 spin_lock(&proc_subdir_lock);
495 filp->f_pos++;
496 next = de->next;
497 pde_put(de);
498 de = next;
499 } while (de);
500 spin_unlock(&proc_subdir_lock);
501 }
502 ret = 1;
503 out:
504 return ret;
505 }
506
507 int proc_readdir(struct file *filp, void *dirent, filldir_t filldir)
508 {
509 struct inode *inode = file_inode(filp);
510
511 return proc_readdir_de(PDE(inode), filp, dirent, filldir);
512 }
513
514 /*
515 * These are the generic /proc directory operations. They
516 * use the in-memory "struct proc_dir_entry" tree to parse
517 * the /proc directory.
518 */
519 static const struct file_operations proc_dir_operations = {
520 .llseek = generic_file_llseek,
521 .read = generic_read_dir,
522 .readdir = proc_readdir,
523 };
524
525 /*
526 * proc directories can do almost nothing..
527 */
528 static const struct inode_operations proc_dir_inode_operations = {
529 .lookup = proc_lookup,
530 .getattr = proc_getattr,
531 .setattr = proc_notify_change,
532 };
533
534 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
535 {
536 struct proc_dir_entry *tmp;
537 int ret;
538
539 ret = proc_alloc_inum(&dp->low_ino);
540 if (ret)
541 return ret;
542
543 if (S_ISDIR(dp->mode)) {
544 if (dp->proc_iops == NULL) {
545 dp->proc_fops = &proc_dir_operations;
546 dp->proc_iops = &proc_dir_inode_operations;
547 }
548 dir->nlink++;
549 } else if (S_ISLNK(dp->mode)) {
550 if (dp->proc_iops == NULL)
551 dp->proc_iops = &proc_link_inode_operations;
552 } else if (S_ISREG(dp->mode)) {
553 if (dp->proc_fops == NULL)
554 dp->proc_fops = &proc_file_operations;
555 if (dp->proc_iops == NULL)
556 dp->proc_iops = &proc_file_inode_operations;
557 }
558
559 spin_lock(&proc_subdir_lock);
560
561 for (tmp = dir->subdir; tmp; tmp = tmp->next)
562 if (strcmp(tmp->name, dp->name) == 0) {
563 WARN(1, "proc_dir_entry '%s/%s' already registered\n",
564 dir->name, dp->name);
565 break;
566 }
567
568 dp->next = dir->subdir;
569 dp->parent = dir;
570 dir->subdir = dp;
571 spin_unlock(&proc_subdir_lock);
572
573 return 0;
574 }
575
576 static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
577 const char *name,
578 umode_t mode,
579 nlink_t nlink)
580 {
581 struct proc_dir_entry *ent = NULL;
582 const char *fn = name;
583 unsigned int len;
584
585 /* make sure name is valid */
586 if (!name || !strlen(name))
587 goto out;
588
589 if (xlate_proc_name(name, parent, &fn) != 0)
590 goto out;
591
592 /* At this point there must not be any '/' characters beyond *fn */
593 if (strchr(fn, '/'))
594 goto out;
595
596 len = strlen(fn);
597
598 ent = kzalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
599 if (!ent)
600 goto out;
601
602 memcpy(ent->name, fn, len + 1);
603 ent->namelen = len;
604 ent->mode = mode;
605 ent->nlink = nlink;
606 atomic_set(&ent->count, 1);
607 spin_lock_init(&ent->pde_unload_lock);
608 INIT_LIST_HEAD(&ent->pde_openers);
609 out:
610 return ent;
611 }
612
613 struct proc_dir_entry *proc_symlink(const char *name,
614 struct proc_dir_entry *parent, const char *dest)
615 {
616 struct proc_dir_entry *ent;
617
618 ent = __proc_create(&parent, name,
619 (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1);
620
621 if (ent) {
622 ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL);
623 if (ent->data) {
624 strcpy((char*)ent->data,dest);
625 if (proc_register(parent, ent) < 0) {
626 kfree(ent->data);
627 kfree(ent);
628 ent = NULL;
629 }
630 } else {
631 kfree(ent);
632 ent = NULL;
633 }
634 }
635 return ent;
636 }
637 EXPORT_SYMBOL(proc_symlink);
638
639 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
640 struct proc_dir_entry *parent)
641 {
642 struct proc_dir_entry *ent;
643
644 ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
645 if (ent) {
646 if (proc_register(parent, ent) < 0) {
647 kfree(ent);
648 ent = NULL;
649 }
650 }
651 return ent;
652 }
653 EXPORT_SYMBOL(proc_mkdir_mode);
654
655 struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
656 struct proc_dir_entry *parent)
657 {
658 struct proc_dir_entry *ent;
659
660 ent = __proc_create(&parent, name, S_IFDIR | S_IRUGO | S_IXUGO, 2);
661 if (ent) {
662 ent->data = net;
663 if (proc_register(parent, ent) < 0) {
664 kfree(ent);
665 ent = NULL;
666 }
667 }
668 return ent;
669 }
670 EXPORT_SYMBOL_GPL(proc_net_mkdir);
671
672 struct proc_dir_entry *proc_mkdir(const char *name,
673 struct proc_dir_entry *parent)
674 {
675 return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent);
676 }
677 EXPORT_SYMBOL(proc_mkdir);
678
679 struct proc_dir_entry *create_proc_entry(const char *name, umode_t mode,
680 struct proc_dir_entry *parent)
681 {
682 struct proc_dir_entry *ent;
683 nlink_t nlink;
684
685 if (S_ISDIR(mode)) {
686 if ((mode & S_IALLUGO) == 0)
687 mode |= S_IRUGO | S_IXUGO;
688 nlink = 2;
689 } else {
690 if ((mode & S_IFMT) == 0)
691 mode |= S_IFREG;
692 if ((mode & S_IALLUGO) == 0)
693 mode |= S_IRUGO;
694 nlink = 1;
695 }
696
697 ent = __proc_create(&parent, name, mode, nlink);
698 if (ent) {
699 if (proc_register(parent, ent) < 0) {
700 kfree(ent);
701 ent = NULL;
702 }
703 }
704 return ent;
705 }
706 EXPORT_SYMBOL(create_proc_entry);
707
708 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
709 struct proc_dir_entry *parent,
710 const struct file_operations *proc_fops,
711 void *data)
712 {
713 struct proc_dir_entry *pde;
714 nlink_t nlink;
715
716 if (S_ISDIR(mode)) {
717 if ((mode & S_IALLUGO) == 0)
718 mode |= S_IRUGO | S_IXUGO;
719 nlink = 2;
720 } else {
721 if ((mode & S_IFMT) == 0)
722 mode |= S_IFREG;
723 if ((mode & S_IALLUGO) == 0)
724 mode |= S_IRUGO;
725 nlink = 1;
726 }
727
728 pde = __proc_create(&parent, name, mode, nlink);
729 if (!pde)
730 goto out;
731 pde->proc_fops = proc_fops;
732 pde->data = data;
733 if (proc_register(parent, pde) < 0)
734 goto out_free;
735 return pde;
736 out_free:
737 kfree(pde);
738 out:
739 return NULL;
740 }
741 EXPORT_SYMBOL(proc_create_data);
742
743 static void free_proc_entry(struct proc_dir_entry *de)
744 {
745 proc_free_inum(de->low_ino);
746
747 if (S_ISLNK(de->mode))
748 kfree(de->data);
749 kfree(de);
750 }
751
752 void pde_put(struct proc_dir_entry *pde)
753 {
754 if (atomic_dec_and_test(&pde->count))
755 free_proc_entry(pde);
756 }
757
758 /*
759 * Remove a /proc entry and free it if it's not currently in use.
760 */
761 void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
762 {
763 struct proc_dir_entry **p;
764 struct proc_dir_entry *de = NULL;
765 const char *fn = name;
766 unsigned int len;
767
768 spin_lock(&proc_subdir_lock);
769 if (__xlate_proc_name(name, &parent, &fn) != 0) {
770 spin_unlock(&proc_subdir_lock);
771 return;
772 }
773 len = strlen(fn);
774
775 for (p = &parent->subdir; *p; p=&(*p)->next ) {
776 if (proc_match(len, fn, *p)) {
777 de = *p;
778 *p = de->next;
779 de->next = NULL;
780 break;
781 }
782 }
783 spin_unlock(&proc_subdir_lock);
784 if (!de) {
785 WARN(1, "name '%s'\n", name);
786 return;
787 }
788
789 spin_lock(&de->pde_unload_lock);
790 /*
791 * Stop accepting new callers into module. If you're
792 * dynamically allocating ->proc_fops, save a pointer somewhere.
793 */
794 de->proc_fops = NULL;
795 /* Wait until all existing callers into module are done. */
796 if (de->pde_users > 0) {
797 DECLARE_COMPLETION_ONSTACK(c);
798
799 if (!de->pde_unload_completion)
800 de->pde_unload_completion = &c;
801
802 spin_unlock(&de->pde_unload_lock);
803
804 wait_for_completion(de->pde_unload_completion);
805
806 spin_lock(&de->pde_unload_lock);
807 }
808
809 while (!list_empty(&de->pde_openers)) {
810 struct pde_opener *pdeo;
811
812 pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh);
813 list_del(&pdeo->lh);
814 spin_unlock(&de->pde_unload_lock);
815 pdeo->release(pdeo->inode, pdeo->file);
816 kfree(pdeo);
817 spin_lock(&de->pde_unload_lock);
818 }
819 spin_unlock(&de->pde_unload_lock);
820
821 if (S_ISDIR(de->mode))
822 parent->nlink--;
823 de->nlink = 0;
824 WARN(de->subdir, "%s: removing non-empty directory "
825 "'%s/%s', leaking at least '%s'\n", __func__,
826 de->parent->name, de->name, de->subdir->name);
827 pde_put(de);
828 }
829 EXPORT_SYMBOL(remove_proc_entry);