]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/proc/generic.c
procfs: new helper - PDE_DATA(inode)
[mirror_ubuntu-bionic-kernel.git] / fs / proc / generic.c
1 /*
2 * proc/fs/generic.c --- generic routines for the proc-fs
3 *
4 * This file contains generic proc-fs routines for handling
5 * directories and files.
6 *
7 * Copyright (C) 1991, 1992 Linus Torvalds.
8 * Copyright (C) 1997 Theodore Ts'o
9 */
10
11 #include <linux/errno.h>
12 #include <linux/time.h>
13 #include <linux/proc_fs.h>
14 #include <linux/stat.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/printk.h>
19 #include <linux/mount.h>
20 #include <linux/init.h>
21 #include <linux/idr.h>
22 #include <linux/namei.h>
23 #include <linux/bitops.h>
24 #include <linux/spinlock.h>
25 #include <linux/completion.h>
26 #include <asm/uaccess.h>
27
28 #include "internal.h"
29
30 DEFINE_SPINLOCK(proc_subdir_lock);
31
32 static int proc_match(unsigned int len, const char *name, struct proc_dir_entry *de)
33 {
34 if (de->namelen != len)
35 return 0;
36 return !memcmp(name, de->name, len);
37 }
38
39 /* buffer size is one page but our output routines use some slack for overruns */
40 #define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
41
42 static ssize_t
43 __proc_file_read(struct file *file, char __user *buf, size_t nbytes,
44 loff_t *ppos)
45 {
46 struct inode * inode = file_inode(file);
47 char *page;
48 ssize_t retval=0;
49 int eof=0;
50 ssize_t n, count;
51 char *start;
52 struct proc_dir_entry * dp;
53 unsigned long long pos;
54
55 /*
56 * Gaah, please just use "seq_file" instead. The legacy /proc
57 * interfaces cut loff_t down to off_t for reads, and ignore
58 * the offset entirely for writes..
59 */
60 pos = *ppos;
61 if (pos > MAX_NON_LFS)
62 return 0;
63 if (nbytes > MAX_NON_LFS - pos)
64 nbytes = MAX_NON_LFS - pos;
65
66 dp = PDE(inode);
67 if (!(page = (char*) __get_free_page(GFP_TEMPORARY)))
68 return -ENOMEM;
69
70 while ((nbytes > 0) && !eof) {
71 count = min_t(size_t, PROC_BLOCK_SIZE, nbytes);
72
73 start = NULL;
74 if (dp->read_proc) {
75 /*
76 * How to be a proc read function
77 * ------------------------------
78 * Prototype:
79 * int f(char *buffer, char **start, off_t offset,
80 * int count, int *peof, void *dat)
81 *
82 * Assume that the buffer is "count" bytes in size.
83 *
84 * If you know you have supplied all the data you
85 * have, set *peof.
86 *
87 * You have three ways to return data:
88 * 0) Leave *start = NULL. (This is the default.)
89 * Put the data of the requested offset at that
90 * offset within the buffer. Return the number (n)
91 * of bytes there are from the beginning of the
92 * buffer up to the last byte of data. If the
93 * number of supplied bytes (= n - offset) is
94 * greater than zero and you didn't signal eof
95 * and the reader is prepared to take more data
96 * you will be called again with the requested
97 * offset advanced by the number of bytes
98 * absorbed. This interface is useful for files
99 * no larger than the buffer.
100 * 1) Set *start = an unsigned long value less than
101 * the buffer address but greater than zero.
102 * Put the data of the requested offset at the
103 * beginning of the buffer. Return the number of
104 * bytes of data placed there. If this number is
105 * greater than zero and you didn't signal eof
106 * and the reader is prepared to take more data
107 * you will be called again with the requested
108 * offset advanced by *start. This interface is
109 * useful when you have a large file consisting
110 * of a series of blocks which you want to count
111 * and return as wholes.
112 * (Hack by Paul.Russell@rustcorp.com.au)
113 * 2) Set *start = an address within the buffer.
114 * Put the data of the requested offset at *start.
115 * Return the number of bytes of data placed there.
116 * If this number is greater than zero and you
117 * didn't signal eof and the reader is prepared to
118 * take more data you will be called again with the
119 * requested offset advanced by the number of bytes
120 * absorbed.
121 */
122 n = dp->read_proc(page, &start, *ppos,
123 count, &eof, dp->data);
124 } else
125 break;
126
127 if (n == 0) /* end of file */
128 break;
129 if (n < 0) { /* error */
130 if (retval == 0)
131 retval = n;
132 break;
133 }
134
135 if (start == NULL) {
136 if (n > PAGE_SIZE) /* Apparent buffer overflow */
137 n = PAGE_SIZE;
138 n -= *ppos;
139 if (n <= 0)
140 break;
141 if (n > count)
142 n = count;
143 start = page + *ppos;
144 } else if (start < page) {
145 if (n > PAGE_SIZE) /* Apparent buffer overflow */
146 n = PAGE_SIZE;
147 if (n > count) {
148 /*
149 * Don't reduce n because doing so might
150 * cut off part of a data block.
151 */
152 pr_warn("proc_file_read: count exceeded\n");
153 }
154 } else /* start >= page */ {
155 unsigned long startoff = (unsigned long)(start - page);
156 if (n > (PAGE_SIZE - startoff)) /* buffer overflow? */
157 n = PAGE_SIZE - startoff;
158 if (n > count)
159 n = count;
160 }
161
162 n -= copy_to_user(buf, start < page ? page : start, n);
163 if (n == 0) {
164 if (retval == 0)
165 retval = -EFAULT;
166 break;
167 }
168
169 *ppos += start < page ? (unsigned long)start : n;
170 nbytes -= n;
171 buf += n;
172 retval += n;
173 }
174 free_page((unsigned long) page);
175 return retval;
176 }
177
178 static ssize_t
179 proc_file_read(struct file *file, char __user *buf, size_t nbytes,
180 loff_t *ppos)
181 {
182 struct proc_dir_entry *pde = PDE(file_inode(file));
183 ssize_t rv = -EIO;
184
185 spin_lock(&pde->pde_unload_lock);
186 if (!pde->proc_fops) {
187 spin_unlock(&pde->pde_unload_lock);
188 return rv;
189 }
190 pde->pde_users++;
191 spin_unlock(&pde->pde_unload_lock);
192
193 rv = __proc_file_read(file, buf, nbytes, ppos);
194
195 pde_users_dec(pde);
196 return rv;
197 }
198
199 static loff_t
200 proc_file_lseek(struct file *file, loff_t offset, int orig)
201 {
202 loff_t retval = -EINVAL;
203 switch (orig) {
204 case 1:
205 offset += file->f_pos;
206 /* fallthrough */
207 case 0:
208 if (offset < 0 || offset > MAX_NON_LFS)
209 break;
210 file->f_pos = retval = offset;
211 }
212 return retval;
213 }
214
215 static const struct file_operations proc_file_operations = {
216 .llseek = proc_file_lseek,
217 .read = proc_file_read,
218 };
219
220 static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
221 {
222 struct inode *inode = dentry->d_inode;
223 struct proc_dir_entry *de = PDE(inode);
224 int error;
225
226 error = inode_change_ok(inode, iattr);
227 if (error)
228 return error;
229
230 setattr_copy(inode, iattr);
231 mark_inode_dirty(inode);
232
233 de->uid = inode->i_uid;
234 de->gid = inode->i_gid;
235 de->mode = inode->i_mode;
236 return 0;
237 }
238
239 static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry,
240 struct kstat *stat)
241 {
242 struct inode *inode = dentry->d_inode;
243 struct proc_dir_entry *de = PROC_I(inode)->pde;
244 if (de && de->nlink)
245 set_nlink(inode, de->nlink);
246
247 generic_fillattr(inode, stat);
248 return 0;
249 }
250
251 static const struct inode_operations proc_file_inode_operations = {
252 .setattr = proc_notify_change,
253 };
254
255 /*
256 * This function parses a name such as "tty/driver/serial", and
257 * returns the struct proc_dir_entry for "/proc/tty/driver", and
258 * returns "serial" in residual.
259 */
260 static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret,
261 const char **residual)
262 {
263 const char *cp = name, *next;
264 struct proc_dir_entry *de;
265 unsigned int len;
266
267 de = *ret;
268 if (!de)
269 de = &proc_root;
270
271 while (1) {
272 next = strchr(cp, '/');
273 if (!next)
274 break;
275
276 len = next - cp;
277 for (de = de->subdir; de ; de = de->next) {
278 if (proc_match(len, cp, de))
279 break;
280 }
281 if (!de) {
282 WARN(1, "name '%s'\n", name);
283 return -ENOENT;
284 }
285 cp += len + 1;
286 }
287 *residual = cp;
288 *ret = de;
289 return 0;
290 }
291
292 static int xlate_proc_name(const char *name, struct proc_dir_entry **ret,
293 const char **residual)
294 {
295 int rv;
296
297 spin_lock(&proc_subdir_lock);
298 rv = __xlate_proc_name(name, ret, residual);
299 spin_unlock(&proc_subdir_lock);
300 return rv;
301 }
302
303 static DEFINE_IDA(proc_inum_ida);
304 static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */
305
306 #define PROC_DYNAMIC_FIRST 0xF0000000U
307
308 /*
309 * Return an inode number between PROC_DYNAMIC_FIRST and
310 * 0xffffffff, or zero on failure.
311 */
312 int proc_alloc_inum(unsigned int *inum)
313 {
314 unsigned int i;
315 int error;
316
317 retry:
318 if (!ida_pre_get(&proc_inum_ida, GFP_KERNEL))
319 return -ENOMEM;
320
321 spin_lock_irq(&proc_inum_lock);
322 error = ida_get_new(&proc_inum_ida, &i);
323 spin_unlock_irq(&proc_inum_lock);
324 if (error == -EAGAIN)
325 goto retry;
326 else if (error)
327 return error;
328
329 if (i > UINT_MAX - PROC_DYNAMIC_FIRST) {
330 spin_lock_irq(&proc_inum_lock);
331 ida_remove(&proc_inum_ida, i);
332 spin_unlock_irq(&proc_inum_lock);
333 return -ENOSPC;
334 }
335 *inum = PROC_DYNAMIC_FIRST + i;
336 return 0;
337 }
338
339 void proc_free_inum(unsigned int inum)
340 {
341 unsigned long flags;
342 spin_lock_irqsave(&proc_inum_lock, flags);
343 ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
344 spin_unlock_irqrestore(&proc_inum_lock, flags);
345 }
346
347 static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
348 {
349 nd_set_link(nd, PDE_DATA(dentry->d_inode));
350 return NULL;
351 }
352
353 static const struct inode_operations proc_link_inode_operations = {
354 .readlink = generic_readlink,
355 .follow_link = proc_follow_link,
356 };
357
358 /*
359 * As some entries in /proc are volatile, we want to
360 * get rid of unused dentries. This could be made
361 * smarter: we could keep a "volatile" flag in the
362 * inode to indicate which ones to keep.
363 */
364 static int proc_delete_dentry(const struct dentry * dentry)
365 {
366 return 1;
367 }
368
369 static const struct dentry_operations proc_dentry_operations =
370 {
371 .d_delete = proc_delete_dentry,
372 };
373
374 /*
375 * Don't create negative dentries here, return -ENOENT by hand
376 * instead.
377 */
378 struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
379 struct dentry *dentry)
380 {
381 struct inode *inode;
382
383 spin_lock(&proc_subdir_lock);
384 for (de = de->subdir; de ; de = de->next) {
385 if (de->namelen != dentry->d_name.len)
386 continue;
387 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
388 pde_get(de);
389 spin_unlock(&proc_subdir_lock);
390 inode = proc_get_inode(dir->i_sb, de);
391 if (!inode)
392 return ERR_PTR(-ENOMEM);
393 d_set_d_op(dentry, &proc_dentry_operations);
394 d_add(dentry, inode);
395 return NULL;
396 }
397 }
398 spin_unlock(&proc_subdir_lock);
399 return ERR_PTR(-ENOENT);
400 }
401
402 struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
403 unsigned int flags)
404 {
405 return proc_lookup_de(PDE(dir), dir, dentry);
406 }
407
408 /*
409 * This returns non-zero if at EOF, so that the /proc
410 * root directory can use this and check if it should
411 * continue with the <pid> entries..
412 *
413 * Note that the VFS-layer doesn't care about the return
414 * value of the readdir() call, as long as it's non-negative
415 * for success..
416 */
417 int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
418 filldir_t filldir)
419 {
420 unsigned int ino;
421 int i;
422 struct inode *inode = file_inode(filp);
423 int ret = 0;
424
425 ino = inode->i_ino;
426 i = filp->f_pos;
427 switch (i) {
428 case 0:
429 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
430 goto out;
431 i++;
432 filp->f_pos++;
433 /* fall through */
434 case 1:
435 if (filldir(dirent, "..", 2, i,
436 parent_ino(filp->f_path.dentry),
437 DT_DIR) < 0)
438 goto out;
439 i++;
440 filp->f_pos++;
441 /* fall through */
442 default:
443 spin_lock(&proc_subdir_lock);
444 de = de->subdir;
445 i -= 2;
446 for (;;) {
447 if (!de) {
448 ret = 1;
449 spin_unlock(&proc_subdir_lock);
450 goto out;
451 }
452 if (!i)
453 break;
454 de = de->next;
455 i--;
456 }
457
458 do {
459 struct proc_dir_entry *next;
460
461 /* filldir passes info to user space */
462 pde_get(de);
463 spin_unlock(&proc_subdir_lock);
464 if (filldir(dirent, de->name, de->namelen, filp->f_pos,
465 de->low_ino, de->mode >> 12) < 0) {
466 pde_put(de);
467 goto out;
468 }
469 spin_lock(&proc_subdir_lock);
470 filp->f_pos++;
471 next = de->next;
472 pde_put(de);
473 de = next;
474 } while (de);
475 spin_unlock(&proc_subdir_lock);
476 }
477 ret = 1;
478 out:
479 return ret;
480 }
481
482 int proc_readdir(struct file *filp, void *dirent, filldir_t filldir)
483 {
484 struct inode *inode = file_inode(filp);
485
486 return proc_readdir_de(PDE(inode), filp, dirent, filldir);
487 }
488
489 /*
490 * These are the generic /proc directory operations. They
491 * use the in-memory "struct proc_dir_entry" tree to parse
492 * the /proc directory.
493 */
494 static const struct file_operations proc_dir_operations = {
495 .llseek = generic_file_llseek,
496 .read = generic_read_dir,
497 .readdir = proc_readdir,
498 };
499
500 /*
501 * proc directories can do almost nothing..
502 */
503 static const struct inode_operations proc_dir_inode_operations = {
504 .lookup = proc_lookup,
505 .getattr = proc_getattr,
506 .setattr = proc_notify_change,
507 };
508
509 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
510 {
511 struct proc_dir_entry *tmp;
512 int ret;
513
514 ret = proc_alloc_inum(&dp->low_ino);
515 if (ret)
516 return ret;
517
518 if (S_ISDIR(dp->mode)) {
519 dp->proc_fops = &proc_dir_operations;
520 dp->proc_iops = &proc_dir_inode_operations;
521 dir->nlink++;
522 } else if (S_ISLNK(dp->mode)) {
523 dp->proc_iops = &proc_link_inode_operations;
524 } else if (S_ISREG(dp->mode)) {
525 if (dp->proc_fops == NULL)
526 dp->proc_fops = &proc_file_operations;
527 dp->proc_iops = &proc_file_inode_operations;
528 } else {
529 WARN_ON(1);
530 return -EINVAL;
531 }
532
533 spin_lock(&proc_subdir_lock);
534
535 for (tmp = dir->subdir; tmp; tmp = tmp->next)
536 if (strcmp(tmp->name, dp->name) == 0) {
537 WARN(1, "proc_dir_entry '%s/%s' already registered\n",
538 dir->name, dp->name);
539 break;
540 }
541
542 dp->next = dir->subdir;
543 dp->parent = dir;
544 dir->subdir = dp;
545 spin_unlock(&proc_subdir_lock);
546
547 return 0;
548 }
549
550 static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
551 const char *name,
552 umode_t mode,
553 nlink_t nlink)
554 {
555 struct proc_dir_entry *ent = NULL;
556 const char *fn = name;
557 unsigned int len;
558
559 /* make sure name is valid */
560 if (!name || !strlen(name))
561 goto out;
562
563 if (xlate_proc_name(name, parent, &fn) != 0)
564 goto out;
565
566 /* At this point there must not be any '/' characters beyond *fn */
567 if (strchr(fn, '/'))
568 goto out;
569
570 len = strlen(fn);
571
572 ent = kzalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
573 if (!ent)
574 goto out;
575
576 memcpy(ent->name, fn, len + 1);
577 ent->namelen = len;
578 ent->mode = mode;
579 ent->nlink = nlink;
580 atomic_set(&ent->count, 1);
581 spin_lock_init(&ent->pde_unload_lock);
582 INIT_LIST_HEAD(&ent->pde_openers);
583 out:
584 return ent;
585 }
586
587 struct proc_dir_entry *proc_symlink(const char *name,
588 struct proc_dir_entry *parent, const char *dest)
589 {
590 struct proc_dir_entry *ent;
591
592 ent = __proc_create(&parent, name,
593 (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1);
594
595 if (ent) {
596 ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL);
597 if (ent->data) {
598 strcpy((char*)ent->data,dest);
599 if (proc_register(parent, ent) < 0) {
600 kfree(ent->data);
601 kfree(ent);
602 ent = NULL;
603 }
604 } else {
605 kfree(ent);
606 ent = NULL;
607 }
608 }
609 return ent;
610 }
611 EXPORT_SYMBOL(proc_symlink);
612
613 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
614 struct proc_dir_entry *parent)
615 {
616 struct proc_dir_entry *ent;
617
618 ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
619 if (ent) {
620 if (proc_register(parent, ent) < 0) {
621 kfree(ent);
622 ent = NULL;
623 }
624 }
625 return ent;
626 }
627 EXPORT_SYMBOL(proc_mkdir_mode);
628
629 struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
630 struct proc_dir_entry *parent)
631 {
632 struct proc_dir_entry *ent;
633
634 ent = __proc_create(&parent, name, S_IFDIR | S_IRUGO | S_IXUGO, 2);
635 if (ent) {
636 ent->data = net;
637 if (proc_register(parent, ent) < 0) {
638 kfree(ent);
639 ent = NULL;
640 }
641 }
642 return ent;
643 }
644 EXPORT_SYMBOL_GPL(proc_net_mkdir);
645
646 struct proc_dir_entry *proc_mkdir(const char *name,
647 struct proc_dir_entry *parent)
648 {
649 return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent);
650 }
651 EXPORT_SYMBOL(proc_mkdir);
652
653 struct proc_dir_entry *create_proc_entry(const char *name, umode_t mode,
654 struct proc_dir_entry *parent)
655 {
656 struct proc_dir_entry *ent;
657
658 if ((mode & S_IFMT) == 0)
659 mode |= S_IFREG;
660
661 if (!S_ISREG(mode)) {
662 WARN_ON(1); /* use proc_mkdir(), damnit */
663 return NULL;
664 }
665
666 if ((mode & S_IALLUGO) == 0)
667 mode |= S_IRUGO;
668
669 ent = __proc_create(&parent, name, mode, 1);
670 if (ent) {
671 if (proc_register(parent, ent) < 0) {
672 kfree(ent);
673 ent = NULL;
674 }
675 }
676 return ent;
677 }
678 EXPORT_SYMBOL(create_proc_entry);
679
680 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
681 struct proc_dir_entry *parent,
682 const struct file_operations *proc_fops,
683 void *data)
684 {
685 struct proc_dir_entry *pde;
686 if ((mode & S_IFMT) == 0)
687 mode |= S_IFREG;
688
689 if (!S_ISREG(mode)) {
690 WARN_ON(1); /* use proc_mkdir() */
691 return NULL;
692 }
693
694 if ((mode & S_IALLUGO) == 0)
695 mode |= S_IRUGO;
696 pde = __proc_create(&parent, name, mode, 1);
697 if (!pde)
698 goto out;
699 pde->proc_fops = proc_fops;
700 pde->data = data;
701 if (proc_register(parent, pde) < 0)
702 goto out_free;
703 return pde;
704 out_free:
705 kfree(pde);
706 out:
707 return NULL;
708 }
709 EXPORT_SYMBOL(proc_create_data);
710
711 static void free_proc_entry(struct proc_dir_entry *de)
712 {
713 proc_free_inum(de->low_ino);
714
715 if (S_ISLNK(de->mode))
716 kfree(de->data);
717 kfree(de);
718 }
719
720 void pde_put(struct proc_dir_entry *pde)
721 {
722 if (atomic_dec_and_test(&pde->count))
723 free_proc_entry(pde);
724 }
725
726 static void entry_rundown(struct proc_dir_entry *de)
727 {
728 spin_lock(&de->pde_unload_lock);
729 /*
730 * Stop accepting new callers into module. If you're
731 * dynamically allocating ->proc_fops, save a pointer somewhere.
732 */
733 de->proc_fops = NULL;
734 /* Wait until all existing callers into module are done. */
735 if (de->pde_users > 0) {
736 DECLARE_COMPLETION_ONSTACK(c);
737
738 if (!de->pde_unload_completion)
739 de->pde_unload_completion = &c;
740
741 spin_unlock(&de->pde_unload_lock);
742
743 wait_for_completion(de->pde_unload_completion);
744
745 spin_lock(&de->pde_unload_lock);
746 }
747
748 while (!list_empty(&de->pde_openers)) {
749 struct pde_opener *pdeo;
750
751 pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh);
752 list_del(&pdeo->lh);
753 spin_unlock(&de->pde_unload_lock);
754 pdeo->release(pdeo->inode, pdeo->file);
755 kfree(pdeo);
756 spin_lock(&de->pde_unload_lock);
757 }
758 spin_unlock(&de->pde_unload_lock);
759 }
760
761 /*
762 * Remove a /proc entry and free it if it's not currently in use.
763 */
764 void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
765 {
766 struct proc_dir_entry **p;
767 struct proc_dir_entry *de = NULL;
768 const char *fn = name;
769 unsigned int len;
770
771 spin_lock(&proc_subdir_lock);
772 if (__xlate_proc_name(name, &parent, &fn) != 0) {
773 spin_unlock(&proc_subdir_lock);
774 return;
775 }
776 len = strlen(fn);
777
778 for (p = &parent->subdir; *p; p=&(*p)->next ) {
779 if (proc_match(len, fn, *p)) {
780 de = *p;
781 *p = de->next;
782 de->next = NULL;
783 break;
784 }
785 }
786 spin_unlock(&proc_subdir_lock);
787 if (!de) {
788 WARN(1, "name '%s'\n", name);
789 return;
790 }
791
792 entry_rundown(de);
793
794 if (S_ISDIR(de->mode))
795 parent->nlink--;
796 de->nlink = 0;
797 WARN(de->subdir, "%s: removing non-empty directory "
798 "'%s/%s', leaking at least '%s'\n", __func__,
799 de->parent->name, de->name, de->subdir->name);
800 pde_put(de);
801 }
802 EXPORT_SYMBOL(remove_proc_entry);
803
804 int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
805 {
806 struct proc_dir_entry **p;
807 struct proc_dir_entry *root = NULL, *de, *next;
808 const char *fn = name;
809 unsigned int len;
810
811 spin_lock(&proc_subdir_lock);
812 if (__xlate_proc_name(name, &parent, &fn) != 0) {
813 spin_unlock(&proc_subdir_lock);
814 return -ENOENT;
815 }
816 len = strlen(fn);
817
818 for (p = &parent->subdir; *p; p=&(*p)->next ) {
819 if (proc_match(len, fn, *p)) {
820 root = *p;
821 *p = root->next;
822 root->next = NULL;
823 break;
824 }
825 }
826 if (!root) {
827 spin_unlock(&proc_subdir_lock);
828 return -ENOENT;
829 }
830 de = root;
831 while (1) {
832 next = de->subdir;
833 if (next) {
834 de->subdir = next->next;
835 next->next = NULL;
836 de = next;
837 continue;
838 }
839 spin_unlock(&proc_subdir_lock);
840
841 entry_rundown(de);
842 next = de->parent;
843 if (S_ISDIR(de->mode))
844 next->nlink--;
845 de->nlink = 0;
846 if (de == root)
847 break;
848 pde_put(de);
849
850 spin_lock(&proc_subdir_lock);
851 de = next;
852 }
853 pde_put(root);
854 return 0;
855 }
856 EXPORT_SYMBOL(remove_proc_subtree);