proc: warn on non-existing proc entries
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / proc / generic.c
1 /*
2 * proc/fs/generic.c --- generic routines for the proc-fs
3 *
4 * This file contains generic proc-fs routines for handling
5 * directories and files.
6 *
7 * Copyright (C) 1991, 1992 Linus Torvalds.
8 * Copyright (C) 1997 Theodore Ts'o
9 */
10
11 #include <linux/errno.h>
12 #include <linux/time.h>
13 #include <linux/proc_fs.h>
14 #include <linux/stat.h>
15 #include <linux/module.h>
16 #include <linux/mount.h>
17 #include <linux/init.h>
18 #include <linux/idr.h>
19 #include <linux/namei.h>
20 #include <linux/bitops.h>
21 #include <linux/spinlock.h>
22 #include <linux/completion.h>
23 #include <asm/uaccess.h>
24
25 #include "internal.h"
26
27 DEFINE_SPINLOCK(proc_subdir_lock);
28
29 static int proc_match(int len, const char *name, struct proc_dir_entry *de)
30 {
31 if (de->namelen != len)
32 return 0;
33 return !memcmp(name, de->name, len);
34 }
35
36 /* buffer size is one page but our output routines use some slack for overruns */
37 #define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
38
39 static ssize_t
40 __proc_file_read(struct file *file, char __user *buf, size_t nbytes,
41 loff_t *ppos)
42 {
43 struct inode * inode = file->f_path.dentry->d_inode;
44 char *page;
45 ssize_t retval=0;
46 int eof=0;
47 ssize_t n, count;
48 char *start;
49 struct proc_dir_entry * dp;
50 unsigned long long pos;
51
52 /*
53 * Gaah, please just use "seq_file" instead. The legacy /proc
54 * interfaces cut loff_t down to off_t for reads, and ignore
55 * the offset entirely for writes..
56 */
57 pos = *ppos;
58 if (pos > MAX_NON_LFS)
59 return 0;
60 if (nbytes > MAX_NON_LFS - pos)
61 nbytes = MAX_NON_LFS - pos;
62
63 dp = PDE(inode);
64 if (!(page = (char*) __get_free_page(GFP_TEMPORARY)))
65 return -ENOMEM;
66
67 while ((nbytes > 0) && !eof) {
68 count = min_t(size_t, PROC_BLOCK_SIZE, nbytes);
69
70 start = NULL;
71 if (dp->read_proc) {
72 /*
73 * How to be a proc read function
74 * ------------------------------
75 * Prototype:
76 * int f(char *buffer, char **start, off_t offset,
77 * int count, int *peof, void *dat)
78 *
79 * Assume that the buffer is "count" bytes in size.
80 *
81 * If you know you have supplied all the data you
82 * have, set *peof.
83 *
84 * You have three ways to return data:
85 * 0) Leave *start = NULL. (This is the default.)
86 * Put the data of the requested offset at that
87 * offset within the buffer. Return the number (n)
88 * of bytes there are from the beginning of the
89 * buffer up to the last byte of data. If the
90 * number of supplied bytes (= n - offset) is
91 * greater than zero and you didn't signal eof
92 * and the reader is prepared to take more data
93 * you will be called again with the requested
94 * offset advanced by the number of bytes
95 * absorbed. This interface is useful for files
96 * no larger than the buffer.
97 * 1) Set *start = an unsigned long value less than
98 * the buffer address but greater than zero.
99 * Put the data of the requested offset at the
100 * beginning of the buffer. Return the number of
101 * bytes of data placed there. If this number is
102 * greater than zero and you didn't signal eof
103 * and the reader is prepared to take more data
104 * you will be called again with the requested
105 * offset advanced by *start. This interface is
106 * useful when you have a large file consisting
107 * of a series of blocks which you want to count
108 * and return as wholes.
109 * (Hack by Paul.Russell@rustcorp.com.au)
110 * 2) Set *start = an address within the buffer.
111 * Put the data of the requested offset at *start.
112 * Return the number of bytes of data placed there.
113 * If this number is greater than zero and you
114 * didn't signal eof and the reader is prepared to
115 * take more data you will be called again with the
116 * requested offset advanced by the number of bytes
117 * absorbed.
118 */
119 n = dp->read_proc(page, &start, *ppos,
120 count, &eof, dp->data);
121 } else
122 break;
123
124 if (n == 0) /* end of file */
125 break;
126 if (n < 0) { /* error */
127 if (retval == 0)
128 retval = n;
129 break;
130 }
131
132 if (start == NULL) {
133 if (n > PAGE_SIZE) {
134 printk(KERN_ERR
135 "proc_file_read: Apparent buffer overflow!\n");
136 n = PAGE_SIZE;
137 }
138 n -= *ppos;
139 if (n <= 0)
140 break;
141 if (n > count)
142 n = count;
143 start = page + *ppos;
144 } else if (start < page) {
145 if (n > PAGE_SIZE) {
146 printk(KERN_ERR
147 "proc_file_read: Apparent buffer overflow!\n");
148 n = PAGE_SIZE;
149 }
150 if (n > count) {
151 /*
152 * Don't reduce n because doing so might
153 * cut off part of a data block.
154 */
155 printk(KERN_WARNING
156 "proc_file_read: Read count exceeded\n");
157 }
158 } else /* start >= page */ {
159 unsigned long startoff = (unsigned long)(start - page);
160 if (n > (PAGE_SIZE - startoff)) {
161 printk(KERN_ERR
162 "proc_file_read: Apparent buffer overflow!\n");
163 n = PAGE_SIZE - startoff;
164 }
165 if (n > count)
166 n = count;
167 }
168
169 n -= copy_to_user(buf, start < page ? page : start, n);
170 if (n == 0) {
171 if (retval == 0)
172 retval = -EFAULT;
173 break;
174 }
175
176 *ppos += start < page ? (unsigned long)start : n;
177 nbytes -= n;
178 buf += n;
179 retval += n;
180 }
181 free_page((unsigned long) page);
182 return retval;
183 }
184
185 static ssize_t
186 proc_file_read(struct file *file, char __user *buf, size_t nbytes,
187 loff_t *ppos)
188 {
189 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
190 ssize_t rv = -EIO;
191
192 spin_lock(&pde->pde_unload_lock);
193 if (!pde->proc_fops) {
194 spin_unlock(&pde->pde_unload_lock);
195 return rv;
196 }
197 pde->pde_users++;
198 spin_unlock(&pde->pde_unload_lock);
199
200 rv = __proc_file_read(file, buf, nbytes, ppos);
201
202 pde_users_dec(pde);
203 return rv;
204 }
205
206 static ssize_t
207 proc_file_write(struct file *file, const char __user *buffer,
208 size_t count, loff_t *ppos)
209 {
210 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
211 ssize_t rv = -EIO;
212
213 if (pde->write_proc) {
214 spin_lock(&pde->pde_unload_lock);
215 if (!pde->proc_fops) {
216 spin_unlock(&pde->pde_unload_lock);
217 return rv;
218 }
219 pde->pde_users++;
220 spin_unlock(&pde->pde_unload_lock);
221
222 /* FIXME: does this routine need ppos? probably... */
223 rv = pde->write_proc(file, buffer, count, pde->data);
224 pde_users_dec(pde);
225 }
226 return rv;
227 }
228
229
230 static loff_t
231 proc_file_lseek(struct file *file, loff_t offset, int orig)
232 {
233 loff_t retval = -EINVAL;
234 switch (orig) {
235 case 1:
236 offset += file->f_pos;
237 /* fallthrough */
238 case 0:
239 if (offset < 0 || offset > MAX_NON_LFS)
240 break;
241 file->f_pos = retval = offset;
242 }
243 return retval;
244 }
245
246 static const struct file_operations proc_file_operations = {
247 .llseek = proc_file_lseek,
248 .read = proc_file_read,
249 .write = proc_file_write,
250 };
251
252 static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
253 {
254 struct inode *inode = dentry->d_inode;
255 struct proc_dir_entry *de = PDE(inode);
256 int error;
257
258 error = inode_change_ok(inode, iattr);
259 if (error)
260 goto out;
261
262 error = inode_setattr(inode, iattr);
263 if (error)
264 goto out;
265
266 de->uid = inode->i_uid;
267 de->gid = inode->i_gid;
268 de->mode = inode->i_mode;
269 out:
270 return error;
271 }
272
273 static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry,
274 struct kstat *stat)
275 {
276 struct inode *inode = dentry->d_inode;
277 struct proc_dir_entry *de = PROC_I(inode)->pde;
278 if (de && de->nlink)
279 inode->i_nlink = de->nlink;
280
281 generic_fillattr(inode, stat);
282 return 0;
283 }
284
285 static const struct inode_operations proc_file_inode_operations = {
286 .setattr = proc_notify_change,
287 };
288
289 /*
290 * This function parses a name such as "tty/driver/serial", and
291 * returns the struct proc_dir_entry for "/proc/tty/driver", and
292 * returns "serial" in residual.
293 */
294 static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret,
295 const char **residual)
296 {
297 const char *cp = name, *next;
298 struct proc_dir_entry *de;
299 int len;
300
301 de = *ret;
302 if (!de)
303 de = &proc_root;
304
305 while (1) {
306 next = strchr(cp, '/');
307 if (!next)
308 break;
309
310 len = next - cp;
311 for (de = de->subdir; de ; de = de->next) {
312 if (proc_match(len, cp, de))
313 break;
314 }
315 if (!de) {
316 WARN(1, "name '%s'\n", name);
317 return -ENOENT;
318 }
319 cp += len + 1;
320 }
321 *residual = cp;
322 *ret = de;
323 return 0;
324 }
325
326 static int xlate_proc_name(const char *name, struct proc_dir_entry **ret,
327 const char **residual)
328 {
329 int rv;
330
331 spin_lock(&proc_subdir_lock);
332 rv = __xlate_proc_name(name, ret, residual);
333 spin_unlock(&proc_subdir_lock);
334 return rv;
335 }
336
337 static DEFINE_IDA(proc_inum_ida);
338 static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */
339
340 #define PROC_DYNAMIC_FIRST 0xF0000000U
341
342 /*
343 * Return an inode number between PROC_DYNAMIC_FIRST and
344 * 0xffffffff, or zero on failure.
345 *
346 * Current inode allocations in the proc-fs (hex-numbers):
347 *
348 * 00000000 reserved
349 * 00000001-00000fff static entries (goners)
350 * 001 root-ino
351 *
352 * 00001000-00001fff unused
353 * 0001xxxx-7fffxxxx pid-dir entries for pid 1-7fff
354 * 80000000-efffffff unused
355 * f0000000-ffffffff dynamic entries
356 *
357 * Goal:
358 * Once we split the thing into several virtual filesystems,
359 * we will get rid of magical ranges (and this comment, BTW).
360 */
361 static unsigned int get_inode_number(void)
362 {
363 unsigned int i;
364 int error;
365
366 retry:
367 if (ida_pre_get(&proc_inum_ida, GFP_KERNEL) == 0)
368 return 0;
369
370 spin_lock(&proc_inum_lock);
371 error = ida_get_new(&proc_inum_ida, &i);
372 spin_unlock(&proc_inum_lock);
373 if (error == -EAGAIN)
374 goto retry;
375 else if (error)
376 return 0;
377
378 if (i > UINT_MAX - PROC_DYNAMIC_FIRST) {
379 spin_lock(&proc_inum_lock);
380 ida_remove(&proc_inum_ida, i);
381 spin_unlock(&proc_inum_lock);
382 return 0;
383 }
384 return PROC_DYNAMIC_FIRST + i;
385 }
386
387 static void release_inode_number(unsigned int inum)
388 {
389 spin_lock(&proc_inum_lock);
390 ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
391 spin_unlock(&proc_inum_lock);
392 }
393
394 static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
395 {
396 nd_set_link(nd, PDE(dentry->d_inode)->data);
397 return NULL;
398 }
399
400 static const struct inode_operations proc_link_inode_operations = {
401 .readlink = generic_readlink,
402 .follow_link = proc_follow_link,
403 };
404
405 /*
406 * As some entries in /proc are volatile, we want to
407 * get rid of unused dentries. This could be made
408 * smarter: we could keep a "volatile" flag in the
409 * inode to indicate which ones to keep.
410 */
411 static int proc_delete_dentry(struct dentry * dentry)
412 {
413 return 1;
414 }
415
416 static const struct dentry_operations proc_dentry_operations =
417 {
418 .d_delete = proc_delete_dentry,
419 };
420
421 /*
422 * Don't create negative dentries here, return -ENOENT by hand
423 * instead.
424 */
425 struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
426 struct dentry *dentry)
427 {
428 struct inode *inode = NULL;
429 int error = -ENOENT;
430
431 spin_lock(&proc_subdir_lock);
432 for (de = de->subdir; de ; de = de->next) {
433 if (de->namelen != dentry->d_name.len)
434 continue;
435 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
436 unsigned int ino;
437
438 ino = de->low_ino;
439 pde_get(de);
440 spin_unlock(&proc_subdir_lock);
441 error = -EINVAL;
442 inode = proc_get_inode(dir->i_sb, ino, de);
443 goto out_unlock;
444 }
445 }
446 spin_unlock(&proc_subdir_lock);
447 out_unlock:
448
449 if (inode) {
450 dentry->d_op = &proc_dentry_operations;
451 d_add(dentry, inode);
452 return NULL;
453 }
454 if (de)
455 pde_put(de);
456 return ERR_PTR(error);
457 }
458
459 struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
460 struct nameidata *nd)
461 {
462 return proc_lookup_de(PDE(dir), dir, dentry);
463 }
464
465 /*
466 * This returns non-zero if at EOF, so that the /proc
467 * root directory can use this and check if it should
468 * continue with the <pid> entries..
469 *
470 * Note that the VFS-layer doesn't care about the return
471 * value of the readdir() call, as long as it's non-negative
472 * for success..
473 */
474 int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
475 filldir_t filldir)
476 {
477 unsigned int ino;
478 int i;
479 struct inode *inode = filp->f_path.dentry->d_inode;
480 int ret = 0;
481
482 ino = inode->i_ino;
483 i = filp->f_pos;
484 switch (i) {
485 case 0:
486 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
487 goto out;
488 i++;
489 filp->f_pos++;
490 /* fall through */
491 case 1:
492 if (filldir(dirent, "..", 2, i,
493 parent_ino(filp->f_path.dentry),
494 DT_DIR) < 0)
495 goto out;
496 i++;
497 filp->f_pos++;
498 /* fall through */
499 default:
500 spin_lock(&proc_subdir_lock);
501 de = de->subdir;
502 i -= 2;
503 for (;;) {
504 if (!de) {
505 ret = 1;
506 spin_unlock(&proc_subdir_lock);
507 goto out;
508 }
509 if (!i)
510 break;
511 de = de->next;
512 i--;
513 }
514
515 do {
516 struct proc_dir_entry *next;
517
518 /* filldir passes info to user space */
519 pde_get(de);
520 spin_unlock(&proc_subdir_lock);
521 if (filldir(dirent, de->name, de->namelen, filp->f_pos,
522 de->low_ino, de->mode >> 12) < 0) {
523 pde_put(de);
524 goto out;
525 }
526 spin_lock(&proc_subdir_lock);
527 filp->f_pos++;
528 next = de->next;
529 pde_put(de);
530 de = next;
531 } while (de);
532 spin_unlock(&proc_subdir_lock);
533 }
534 ret = 1;
535 out:
536 return ret;
537 }
538
539 int proc_readdir(struct file *filp, void *dirent, filldir_t filldir)
540 {
541 struct inode *inode = filp->f_path.dentry->d_inode;
542
543 return proc_readdir_de(PDE(inode), filp, dirent, filldir);
544 }
545
546 /*
547 * These are the generic /proc directory operations. They
548 * use the in-memory "struct proc_dir_entry" tree to parse
549 * the /proc directory.
550 */
551 static const struct file_operations proc_dir_operations = {
552 .llseek = generic_file_llseek,
553 .read = generic_read_dir,
554 .readdir = proc_readdir,
555 };
556
557 /*
558 * proc directories can do almost nothing..
559 */
560 static const struct inode_operations proc_dir_inode_operations = {
561 .lookup = proc_lookup,
562 .getattr = proc_getattr,
563 .setattr = proc_notify_change,
564 };
565
566 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
567 {
568 unsigned int i;
569 struct proc_dir_entry *tmp;
570
571 i = get_inode_number();
572 if (i == 0)
573 return -EAGAIN;
574 dp->low_ino = i;
575
576 if (S_ISDIR(dp->mode)) {
577 if (dp->proc_iops == NULL) {
578 dp->proc_fops = &proc_dir_operations;
579 dp->proc_iops = &proc_dir_inode_operations;
580 }
581 dir->nlink++;
582 } else if (S_ISLNK(dp->mode)) {
583 if (dp->proc_iops == NULL)
584 dp->proc_iops = &proc_link_inode_operations;
585 } else if (S_ISREG(dp->mode)) {
586 if (dp->proc_fops == NULL)
587 dp->proc_fops = &proc_file_operations;
588 if (dp->proc_iops == NULL)
589 dp->proc_iops = &proc_file_inode_operations;
590 }
591
592 spin_lock(&proc_subdir_lock);
593
594 for (tmp = dir->subdir; tmp; tmp = tmp->next)
595 if (strcmp(tmp->name, dp->name) == 0) {
596 WARN(1, KERN_WARNING "proc_dir_entry '%s/%s' already registered\n",
597 dir->name, dp->name);
598 break;
599 }
600
601 dp->next = dir->subdir;
602 dp->parent = dir;
603 dir->subdir = dp;
604 spin_unlock(&proc_subdir_lock);
605
606 return 0;
607 }
608
609 static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
610 const char *name,
611 mode_t mode,
612 nlink_t nlink)
613 {
614 struct proc_dir_entry *ent = NULL;
615 const char *fn = name;
616 int len;
617
618 /* make sure name is valid */
619 if (!name || !strlen(name)) goto out;
620
621 if (xlate_proc_name(name, parent, &fn) != 0)
622 goto out;
623
624 /* At this point there must not be any '/' characters beyond *fn */
625 if (strchr(fn, '/'))
626 goto out;
627
628 len = strlen(fn);
629
630 ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
631 if (!ent) goto out;
632
633 memset(ent, 0, sizeof(struct proc_dir_entry));
634 memcpy(((char *) ent) + sizeof(struct proc_dir_entry), fn, len + 1);
635 ent->name = ((char *) ent) + sizeof(*ent);
636 ent->namelen = len;
637 ent->mode = mode;
638 ent->nlink = nlink;
639 atomic_set(&ent->count, 1);
640 ent->pde_users = 0;
641 spin_lock_init(&ent->pde_unload_lock);
642 ent->pde_unload_completion = NULL;
643 INIT_LIST_HEAD(&ent->pde_openers);
644 out:
645 return ent;
646 }
647
648 struct proc_dir_entry *proc_symlink(const char *name,
649 struct proc_dir_entry *parent, const char *dest)
650 {
651 struct proc_dir_entry *ent;
652
653 ent = __proc_create(&parent, name,
654 (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1);
655
656 if (ent) {
657 ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL);
658 if (ent->data) {
659 strcpy((char*)ent->data,dest);
660 if (proc_register(parent, ent) < 0) {
661 kfree(ent->data);
662 kfree(ent);
663 ent = NULL;
664 }
665 } else {
666 kfree(ent);
667 ent = NULL;
668 }
669 }
670 return ent;
671 }
672 EXPORT_SYMBOL(proc_symlink);
673
674 struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
675 struct proc_dir_entry *parent)
676 {
677 struct proc_dir_entry *ent;
678
679 ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
680 if (ent) {
681 if (proc_register(parent, ent) < 0) {
682 kfree(ent);
683 ent = NULL;
684 }
685 }
686 return ent;
687 }
688
689 struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
690 struct proc_dir_entry *parent)
691 {
692 struct proc_dir_entry *ent;
693
694 ent = __proc_create(&parent, name, S_IFDIR | S_IRUGO | S_IXUGO, 2);
695 if (ent) {
696 ent->data = net;
697 if (proc_register(parent, ent) < 0) {
698 kfree(ent);
699 ent = NULL;
700 }
701 }
702 return ent;
703 }
704 EXPORT_SYMBOL_GPL(proc_net_mkdir);
705
706 struct proc_dir_entry *proc_mkdir(const char *name,
707 struct proc_dir_entry *parent)
708 {
709 return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent);
710 }
711 EXPORT_SYMBOL(proc_mkdir);
712
713 struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
714 struct proc_dir_entry *parent)
715 {
716 struct proc_dir_entry *ent;
717 nlink_t nlink;
718
719 if (S_ISDIR(mode)) {
720 if ((mode & S_IALLUGO) == 0)
721 mode |= S_IRUGO | S_IXUGO;
722 nlink = 2;
723 } else {
724 if ((mode & S_IFMT) == 0)
725 mode |= S_IFREG;
726 if ((mode & S_IALLUGO) == 0)
727 mode |= S_IRUGO;
728 nlink = 1;
729 }
730
731 ent = __proc_create(&parent, name, mode, nlink);
732 if (ent) {
733 if (proc_register(parent, ent) < 0) {
734 kfree(ent);
735 ent = NULL;
736 }
737 }
738 return ent;
739 }
740 EXPORT_SYMBOL(create_proc_entry);
741
742 struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
743 struct proc_dir_entry *parent,
744 const struct file_operations *proc_fops,
745 void *data)
746 {
747 struct proc_dir_entry *pde;
748 nlink_t nlink;
749
750 if (S_ISDIR(mode)) {
751 if ((mode & S_IALLUGO) == 0)
752 mode |= S_IRUGO | S_IXUGO;
753 nlink = 2;
754 } else {
755 if ((mode & S_IFMT) == 0)
756 mode |= S_IFREG;
757 if ((mode & S_IALLUGO) == 0)
758 mode |= S_IRUGO;
759 nlink = 1;
760 }
761
762 pde = __proc_create(&parent, name, mode, nlink);
763 if (!pde)
764 goto out;
765 pde->proc_fops = proc_fops;
766 pde->data = data;
767 if (proc_register(parent, pde) < 0)
768 goto out_free;
769 return pde;
770 out_free:
771 kfree(pde);
772 out:
773 return NULL;
774 }
775 EXPORT_SYMBOL(proc_create_data);
776
777 static void free_proc_entry(struct proc_dir_entry *de)
778 {
779 unsigned int ino = de->low_ino;
780
781 if (ino < PROC_DYNAMIC_FIRST)
782 return;
783
784 release_inode_number(ino);
785
786 if (S_ISLNK(de->mode))
787 kfree(de->data);
788 kfree(de);
789 }
790
791 void pde_put(struct proc_dir_entry *pde)
792 {
793 if (atomic_dec_and_test(&pde->count))
794 free_proc_entry(pde);
795 }
796
797 /*
798 * Remove a /proc entry and free it if it's not currently in use.
799 */
800 void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
801 {
802 struct proc_dir_entry **p;
803 struct proc_dir_entry *de = NULL;
804 const char *fn = name;
805 int len;
806
807 spin_lock(&proc_subdir_lock);
808 if (__xlate_proc_name(name, &parent, &fn) != 0) {
809 spin_unlock(&proc_subdir_lock);
810 return;
811 }
812 len = strlen(fn);
813
814 for (p = &parent->subdir; *p; p=&(*p)->next ) {
815 if (proc_match(len, fn, *p)) {
816 de = *p;
817 *p = de->next;
818 de->next = NULL;
819 break;
820 }
821 }
822 spin_unlock(&proc_subdir_lock);
823 if (!de) {
824 WARN(1, "name '%s'\n", name);
825 return;
826 }
827
828 spin_lock(&de->pde_unload_lock);
829 /*
830 * Stop accepting new callers into module. If you're
831 * dynamically allocating ->proc_fops, save a pointer somewhere.
832 */
833 de->proc_fops = NULL;
834 /* Wait until all existing callers into module are done. */
835 if (de->pde_users > 0) {
836 DECLARE_COMPLETION_ONSTACK(c);
837
838 if (!de->pde_unload_completion)
839 de->pde_unload_completion = &c;
840
841 spin_unlock(&de->pde_unload_lock);
842
843 wait_for_completion(de->pde_unload_completion);
844
845 goto continue_removing;
846 }
847 spin_unlock(&de->pde_unload_lock);
848
849 continue_removing:
850 spin_lock(&de->pde_unload_lock);
851 while (!list_empty(&de->pde_openers)) {
852 struct pde_opener *pdeo;
853
854 pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh);
855 list_del(&pdeo->lh);
856 spin_unlock(&de->pde_unload_lock);
857 pdeo->release(pdeo->inode, pdeo->file);
858 kfree(pdeo);
859 spin_lock(&de->pde_unload_lock);
860 }
861 spin_unlock(&de->pde_unload_lock);
862
863 if (S_ISDIR(de->mode))
864 parent->nlink--;
865 de->nlink = 0;
866 WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory "
867 "'%s/%s', leaking at least '%s'\n", __func__,
868 de->parent->name, de->name, de->subdir->name);
869 pde_put(de);
870 }
871 EXPORT_SYMBOL(remove_proc_entry);