4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 * Manage the dynamic fd arrays in the process files_struct.
9 #include <linux/syscalls.h>
10 #include <linux/export.h>
13 #include <linux/mmzone.h>
14 #include <linux/time.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/file.h>
19 #include <linux/fdtable.h>
20 #include <linux/bitops.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
28 int sysctl_nr_open __read_mostly
= 1024*1024;
29 int sysctl_nr_open_min
= BITS_PER_LONG
;
30 int sysctl_nr_open_max
= 1024 * 1024; /* raised later */
32 static void *alloc_fdmem(size_t size
)
35 * Very large allocations can stress page reclaim, so fall back to
36 * vmalloc() if the allocation size will be considered "large" by the VM.
38 if (size
<= (PAGE_SIZE
<< PAGE_ALLOC_COSTLY_ORDER
)) {
39 void *data
= kmalloc(size
, GFP_KERNEL
|__GFP_NOWARN
|__GFP_NORETRY
);
46 static void free_fdmem(void *ptr
)
48 is_vmalloc_addr(ptr
) ? vfree(ptr
) : kfree(ptr
);
51 static void __free_fdtable(struct fdtable
*fdt
)
54 free_fdmem(fdt
->open_fds
);
58 static void free_fdtable_rcu(struct rcu_head
*rcu
)
60 __free_fdtable(container_of(rcu
, struct fdtable
, rcu
));
64 * Expand the fdset in the files_struct. Called with the files spinlock
67 static void copy_fdtable(struct fdtable
*nfdt
, struct fdtable
*ofdt
)
69 unsigned int cpy
, set
;
71 BUG_ON(nfdt
->max_fds
< ofdt
->max_fds
);
73 cpy
= ofdt
->max_fds
* sizeof(struct file
*);
74 set
= (nfdt
->max_fds
- ofdt
->max_fds
) * sizeof(struct file
*);
75 memcpy(nfdt
->fd
, ofdt
->fd
, cpy
);
76 memset((char *)(nfdt
->fd
) + cpy
, 0, set
);
78 cpy
= ofdt
->max_fds
/ BITS_PER_BYTE
;
79 set
= (nfdt
->max_fds
- ofdt
->max_fds
) / BITS_PER_BYTE
;
80 memcpy(nfdt
->open_fds
, ofdt
->open_fds
, cpy
);
81 memset((char *)(nfdt
->open_fds
) + cpy
, 0, set
);
82 memcpy(nfdt
->close_on_exec
, ofdt
->close_on_exec
, cpy
);
83 memset((char *)(nfdt
->close_on_exec
) + cpy
, 0, set
);
86 static struct fdtable
* alloc_fdtable(unsigned int nr
)
92 * Figure out how many fds we actually want to support in this fdtable.
93 * Allocation steps are keyed to the size of the fdarray, since it
94 * grows far faster than any of the other dynamic data. We try to fit
95 * the fdarray into comfortable page-tuned chunks: starting at 1024B
96 * and growing in powers of two from there on.
98 nr
/= (1024 / sizeof(struct file
*));
99 nr
= roundup_pow_of_two(nr
+ 1);
100 nr
*= (1024 / sizeof(struct file
*));
102 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
103 * had been set lower between the check in expand_files() and here. Deal
104 * with that in caller, it's cheaper that way.
106 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
107 * bitmaps handling below becomes unpleasant, to put it mildly...
109 if (unlikely(nr
> sysctl_nr_open
))
110 nr
= ((sysctl_nr_open
- 1) | (BITS_PER_LONG
- 1)) + 1;
112 fdt
= kmalloc(sizeof(struct fdtable
), GFP_KERNEL
);
116 data
= alloc_fdmem(nr
* sizeof(struct file
*));
121 data
= alloc_fdmem(max_t(size_t,
122 2 * nr
/ BITS_PER_BYTE
, L1_CACHE_BYTES
));
125 fdt
->open_fds
= data
;
126 data
+= nr
/ BITS_PER_BYTE
;
127 fdt
->close_on_exec
= data
;
140 * Expand the file descriptor table.
141 * This function will allocate a new fdtable and both fd array and fdset, of
143 * Return <0 error code on error; 1 on successful completion.
144 * The files->file_lock should be held on entry, and will be held on exit.
146 static int expand_fdtable(struct files_struct
*files
, int nr
)
147 __releases(files
->file_lock
)
148 __acquires(files
->file_lock
)
150 struct fdtable
*new_fdt
, *cur_fdt
;
152 spin_unlock(&files
->file_lock
);
153 new_fdt
= alloc_fdtable(nr
);
154 spin_lock(&files
->file_lock
);
158 * extremely unlikely race - sysctl_nr_open decreased between the check in
159 * caller and alloc_fdtable(). Cheaper to catch it here...
161 if (unlikely(new_fdt
->max_fds
<= nr
)) {
162 __free_fdtable(new_fdt
);
166 * Check again since another task may have expanded the fd table while
167 * we dropped the lock
169 cur_fdt
= files_fdtable(files
);
170 if (nr
>= cur_fdt
->max_fds
) {
171 /* Continue as planned */
172 copy_fdtable(new_fdt
, cur_fdt
);
173 rcu_assign_pointer(files
->fdt
, new_fdt
);
174 if (cur_fdt
!= &files
->fdtab
)
175 call_rcu(&cur_fdt
->rcu
, free_fdtable_rcu
);
177 /* Somebody else expanded, so undo our attempt */
178 __free_fdtable(new_fdt
);
185 * This function will expand the file structures, if the requested size exceeds
186 * the current capacity and there is room for expansion.
187 * Return <0 error code on error; 0 when nothing done; 1 when files were
188 * expanded and execution may have blocked.
189 * The files->file_lock should be held on entry, and will be held on exit.
191 static int expand_files(struct files_struct
*files
, int nr
)
195 fdt
= files_fdtable(files
);
197 /* Do we need to expand? */
198 if (nr
< fdt
->max_fds
)
202 if (nr
>= sysctl_nr_open
)
205 /* All good, so we try */
206 return expand_fdtable(files
, nr
);
209 static inline void __set_close_on_exec(int fd
, struct fdtable
*fdt
)
211 __set_bit(fd
, fdt
->close_on_exec
);
214 static inline void __clear_close_on_exec(int fd
, struct fdtable
*fdt
)
216 __clear_bit(fd
, fdt
->close_on_exec
);
219 static inline void __set_open_fd(int fd
, struct fdtable
*fdt
)
221 __set_bit(fd
, fdt
->open_fds
);
224 static inline void __clear_open_fd(int fd
, struct fdtable
*fdt
)
226 __clear_bit(fd
, fdt
->open_fds
);
229 static int count_open_files(struct fdtable
*fdt
)
231 int size
= fdt
->max_fds
;
234 /* Find the last open fd */
235 for (i
= size
/ BITS_PER_LONG
; i
> 0; ) {
236 if (fdt
->open_fds
[--i
])
239 i
= (i
+ 1) * BITS_PER_LONG
;
244 * Allocate a new files structure and copy contents from the
245 * passed in files structure.
246 * errorp will be valid only when the returned files_struct is NULL.
248 struct files_struct
*dup_fd(struct files_struct
*oldf
, int *errorp
)
250 struct files_struct
*newf
;
251 struct file
**old_fds
, **new_fds
;
252 int open_files
, size
, i
;
253 struct fdtable
*old_fdt
, *new_fdt
;
256 newf
= kmem_cache_alloc(files_cachep
, GFP_KERNEL
);
260 atomic_set(&newf
->count
, 1);
262 spin_lock_init(&newf
->file_lock
);
264 new_fdt
= &newf
->fdtab
;
265 new_fdt
->max_fds
= NR_OPEN_DEFAULT
;
266 new_fdt
->close_on_exec
= newf
->close_on_exec_init
;
267 new_fdt
->open_fds
= newf
->open_fds_init
;
268 new_fdt
->fd
= &newf
->fd_array
[0];
270 spin_lock(&oldf
->file_lock
);
271 old_fdt
= files_fdtable(oldf
);
272 open_files
= count_open_files(old_fdt
);
275 * Check whether we need to allocate a larger fd array and fd set.
277 while (unlikely(open_files
> new_fdt
->max_fds
)) {
278 spin_unlock(&oldf
->file_lock
);
280 if (new_fdt
!= &newf
->fdtab
)
281 __free_fdtable(new_fdt
);
283 new_fdt
= alloc_fdtable(open_files
- 1);
289 /* beyond sysctl_nr_open; nothing to do */
290 if (unlikely(new_fdt
->max_fds
< open_files
)) {
291 __free_fdtable(new_fdt
);
297 * Reacquire the oldf lock and a pointer to its fd table
298 * who knows it may have a new bigger fd table. We need
299 * the latest pointer.
301 spin_lock(&oldf
->file_lock
);
302 old_fdt
= files_fdtable(oldf
);
303 open_files
= count_open_files(old_fdt
);
306 old_fds
= old_fdt
->fd
;
307 new_fds
= new_fdt
->fd
;
309 memcpy(new_fdt
->open_fds
, old_fdt
->open_fds
, open_files
/ 8);
310 memcpy(new_fdt
->close_on_exec
, old_fdt
->close_on_exec
, open_files
/ 8);
312 for (i
= open_files
; i
!= 0; i
--) {
313 struct file
*f
= *old_fds
++;
318 * The fd may be claimed in the fd bitmap but not yet
319 * instantiated in the files array if a sibling thread
320 * is partway through open(). So make sure that this
321 * fd is available to the new process.
323 __clear_open_fd(open_files
- i
, new_fdt
);
325 rcu_assign_pointer(*new_fds
++, f
);
327 spin_unlock(&oldf
->file_lock
);
329 /* compute the remainder to be cleared */
330 size
= (new_fdt
->max_fds
- open_files
) * sizeof(struct file
*);
332 /* This is long word aligned thus could use a optimized version */
333 memset(new_fds
, 0, size
);
335 if (new_fdt
->max_fds
> open_files
) {
336 int left
= (new_fdt
->max_fds
- open_files
) / 8;
337 int start
= open_files
/ BITS_PER_LONG
;
339 memset(&new_fdt
->open_fds
[start
], 0, left
);
340 memset(&new_fdt
->close_on_exec
[start
], 0, left
);
343 rcu_assign_pointer(newf
->fdt
, new_fdt
);
348 kmem_cache_free(files_cachep
, newf
);
353 static void close_files(struct files_struct
* files
)
361 * It is safe to dereference the fd table without RCU or
362 * ->file_lock because this is the last reference to the
363 * files structure. But use RCU to shut RCU-lockdep up.
366 fdt
= files_fdtable(files
);
370 i
= j
* BITS_PER_LONG
;
371 if (i
>= fdt
->max_fds
)
373 set
= fdt
->open_fds
[j
++];
376 struct file
* file
= xchg(&fdt
->fd
[i
], NULL
);
378 filp_close(file
, files
);
388 struct files_struct
*get_files_struct(struct task_struct
*task
)
390 struct files_struct
*files
;
395 atomic_inc(&files
->count
);
401 void put_files_struct(struct files_struct
*files
)
405 if (atomic_dec_and_test(&files
->count
)) {
407 /* not really needed, since nobody can see us */
409 fdt
= files_fdtable(files
);
411 /* free the arrays if they are not embedded */
412 if (fdt
!= &files
->fdtab
)
414 kmem_cache_free(files_cachep
, files
);
418 void reset_files_struct(struct files_struct
*files
)
420 struct task_struct
*tsk
= current
;
421 struct files_struct
*old
;
427 put_files_struct(old
);
430 void exit_files(struct task_struct
*tsk
)
432 struct files_struct
* files
= tsk
->files
;
438 put_files_struct(files
);
442 void __init
files_defer_init(void)
444 sysctl_nr_open_max
= min((size_t)INT_MAX
, ~(size_t)0/sizeof(void *)) &
448 struct files_struct init_files
= {
449 .count
= ATOMIC_INIT(1),
450 .fdt
= &init_files
.fdtab
,
452 .max_fds
= NR_OPEN_DEFAULT
,
453 .fd
= &init_files
.fd_array
[0],
454 .close_on_exec
= init_files
.close_on_exec_init
,
455 .open_fds
= init_files
.open_fds_init
,
457 .file_lock
= __SPIN_LOCK_UNLOCKED(init_files
.file_lock
),
461 #define FD_CHECK_NAME_SIZE 256
462 // Declare a radix tree to construct fd set tree
463 static RADIX_TREE(over_fd_tree
, GFP_KERNEL
);
464 static LIST_HEAD(fd_listhead
);
465 static DEFINE_MUTEX(over_fd_mutex
);
469 char name
[FD_CHECK_NAME_SIZE
];
471 struct list_head fd_link
;
475 * Get File Name from FD value
477 long get_file_name_from_fd(struct files_struct
*files
, int fd
, int procid
, struct over_fd_entry
*res_name
)
483 spin_lock(&files
->file_lock
);
486 spin_unlock(&files
->file_lock
);
489 path_get(&file
->f_path
);
492 spin_unlock(&files
->file_lock
);
493 tmp
= (char *)__get_free_page(GFP_TEMPORARY
);
497 pathname
= d_path(&path
, tmp
, PAGE_SIZE
);
500 if (IS_ERR(pathname
))
502 free_page((unsigned long)tmp
);
503 return PTR_ERR(pathname
);
504 } /* do something here with pathname */
507 strncpy(res_name
->name
, pathname
, FD_CHECK_NAME_SIZE
- 1);
509 free_page((unsigned long)tmp
);
513 unsigned int get_hash(char *name
)
515 return full_name_hash(name
, strlen(name
));
518 static struct over_fd_entry
* fd_lookup(unsigned int hash
)
520 return radix_tree_lookup(&over_fd_tree
, hash
);
523 static void fd_insert(struct over_fd_entry
*entry
)
525 unsigned int hash
= get_hash(entry
->name
);
526 struct over_fd_entry
*find_entry
= fd_lookup(hash
);
528 if(!find_entry
) // Can't find the element, just add the element
530 entry
->num_of_fd
= 1;
532 list_add_tail(&entry
->fd_link
, &fd_listhead
);
533 radix_tree_insert(&over_fd_tree
, hash
, (void *)entry
);
535 else // Cover the original element
537 find_entry
->num_of_fd
= find_entry
->num_of_fd
+1;
542 static void fd_delete(unsigned int hash
)
544 radix_tree_delete(&over_fd_tree
, hash
);
547 void fd_show_open_files(pid_t pid
, struct files_struct
*files
, struct fdtable
*fdt
)
550 struct over_fd_entry
*lentry
;
553 int sum_fds_of_pid
= 0;
555 mutex_lock(&over_fd_mutex
);
556 //printk(KERN_ERR "(PID:%d)Max FD Number:%d", current->pid, fdt->max_fds);
557 for(i
=0; i
<fdt
->max_fds
; i
++) {
558 struct over_fd_entry
*entry
= (struct over_fd_entry
*)kzalloc(sizeof(struct over_fd_entry
), GFP_KERNEL
);
560 pr_debug("[FD_LEAK](PID:%d)Empty FD:%d", pid
, i
);
563 memset(entry
->name
, 0, sizeof entry
->name
);
564 result
= get_file_name_from_fd(files
, i
, pid
, entry
);
572 if(list_empty(&fd_listhead
)) {
575 lentry
= list_entry((&fd_listhead
)->next
, struct over_fd_entry
, fd_link
);
576 num_of_entry
= lentry
->num_of_fd
;
577 if(lentry
!= NULL
&& lentry
->name
!=NULL
)
578 pr_debug("[FD_LEAK]OverAllocFDError(PID:%d fileName:%s Num:%d)\n", pid
, lentry
->name
, num_of_entry
);
580 pr_debug("[FD_LEAK]OverAllocFDError(PID:%d fileName:%s Num:%d)\n", pid
, "NULL", num_of_entry
);
581 list_del((&fd_listhead
)->next
);
582 fd_delete(lentry
->hash
);
586 pr_debug("[FD_LEAK]OverAllocFDError(PID:%d totalFDs:%d)\n", pid
, sum_fds_of_pid
);
588 mutex_unlock(&over_fd_mutex
);
593 * allocate a file descriptor, mark it busy.
595 int __alloc_fd(struct files_struct
*files
,
596 unsigned start
, unsigned end
, unsigned flags
)
602 spin_lock(&files
->file_lock
);
604 fdt
= files_fdtable(files
);
606 if (fd
< files
->next_fd
)
609 if (fd
< fdt
->max_fds
)
610 fd
= find_next_zero_bit(fdt
->open_fds
, fdt
->max_fds
, fd
);
613 * N.B. For clone tasks sharing a files structure, this test
614 * will limit the total number of files that can be opened.
620 error
= expand_files(files
, fd
);
625 * If we needed to expand the fs array we
626 * might have blocked - try again.
631 if (start
<= files
->next_fd
)
632 files
->next_fd
= fd
+ 1;
634 __set_open_fd(fd
, fdt
);
635 if (flags
& O_CLOEXEC
)
636 __set_close_on_exec(fd
, fdt
);
638 __clear_close_on_exec(fd
, fdt
);
642 if (rcu_dereference_raw(fdt
->fd
[fd
]) != NULL
) {
643 printk(KERN_WARNING
"alloc_fd: slot %d not NULL!\n", fd
);
644 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
649 spin_unlock(&files
->file_lock
);
651 if(error
== -EMFILE
) {
652 static int dump_current_open_files
= 0;
653 if(!dump_current_open_files
&&
654 strcmp(current
->comm
, "Backbone")) { /*add Backbone into FD white list for skype*/
655 dump_current_open_files
= 0x1;
656 pr_debug("[FD_LEAK](PID:%d)fd over RLIMIT_NOFILE:%ld", current
->pid
, rlimit(RLIMIT_NOFILE
));
657 fd_show_open_files(current
->pid
, files
, fdt
);
664 static int alloc_fd(unsigned start
, unsigned flags
)
666 return __alloc_fd(current
->files
, start
, rlimit(RLIMIT_NOFILE
), flags
);
669 int get_unused_fd_flags(unsigned flags
)
671 return __alloc_fd(current
->files
, 0, rlimit(RLIMIT_NOFILE
), flags
);
673 EXPORT_SYMBOL(get_unused_fd_flags
);
675 static void __put_unused_fd(struct files_struct
*files
, unsigned int fd
)
677 struct fdtable
*fdt
= files_fdtable(files
);
678 __clear_open_fd(fd
, fdt
);
679 if (fd
< files
->next_fd
)
683 void put_unused_fd(unsigned int fd
)
685 struct files_struct
*files
= current
->files
;
686 spin_lock(&files
->file_lock
);
687 __put_unused_fd(files
, fd
);
688 spin_unlock(&files
->file_lock
);
691 EXPORT_SYMBOL(put_unused_fd
);
694 * Install a file pointer in the fd array.
696 * The VFS is full of places where we drop the files lock between
697 * setting the open_fds bitmap and installing the file in the file
698 * array. At any such point, we are vulnerable to a dup2() race
699 * installing a file in the array before us. We need to detect this and
700 * fput() the struct file we are about to overwrite in this case.
702 * It should never happen - if we allow dup2() do it, _really_ bad things
705 * NOTE: __fd_install() variant is really, really low-level; don't
706 * use it unless you are forced to by truly lousy API shoved down
707 * your throat. 'files' *MUST* be either current->files or obtained
708 * by get_files_struct(current) done by whoever had given it to you,
709 * or really bad things will happen. Normally you want to use
710 * fd_install() instead.
713 void __fd_install(struct files_struct
*files
, unsigned int fd
,
717 spin_lock(&files
->file_lock
);
718 fdt
= files_fdtable(files
);
719 BUG_ON(fdt
->fd
[fd
] != NULL
);
720 rcu_assign_pointer(fdt
->fd
[fd
], file
);
721 spin_unlock(&files
->file_lock
);
724 void fd_install(unsigned int fd
, struct file
*file
)
726 __fd_install(current
->files
, fd
, file
);
729 EXPORT_SYMBOL(fd_install
);
732 * The same warnings as for __alloc_fd()/__fd_install() apply here...
734 int __close_fd(struct files_struct
*files
, unsigned fd
)
739 spin_lock(&files
->file_lock
);
740 fdt
= files_fdtable(files
);
741 if (fd
>= fdt
->max_fds
)
746 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
747 __clear_close_on_exec(fd
, fdt
);
748 __put_unused_fd(files
, fd
);
749 spin_unlock(&files
->file_lock
);
750 return filp_close(file
, files
);
753 spin_unlock(&files
->file_lock
);
757 void do_close_on_exec(struct files_struct
*files
)
762 /* exec unshares first */
763 spin_lock(&files
->file_lock
);
766 unsigned fd
= i
* BITS_PER_LONG
;
767 fdt
= files_fdtable(files
);
768 if (fd
>= fdt
->max_fds
)
770 set
= fdt
->close_on_exec
[i
];
773 fdt
->close_on_exec
[i
] = 0;
774 for ( ; set
; fd
++, set
>>= 1) {
781 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
782 __put_unused_fd(files
, fd
);
783 spin_unlock(&files
->file_lock
);
784 filp_close(file
, files
);
786 spin_lock(&files
->file_lock
);
790 spin_unlock(&files
->file_lock
);
793 struct file
*fget(unsigned int fd
)
796 struct files_struct
*files
= current
->files
;
799 file
= fcheck_files(files
, fd
);
801 /* File object ref couldn't be taken */
802 if (file
->f_mode
& FMODE_PATH
||
803 !atomic_long_inc_not_zero(&file
->f_count
))
813 struct file
*fget_raw(unsigned int fd
)
816 struct files_struct
*files
= current
->files
;
819 file
= fcheck_files(files
, fd
);
821 /* File object ref couldn't be taken */
822 if (!atomic_long_inc_not_zero(&file
->f_count
))
830 EXPORT_SYMBOL(fget_raw
);
833 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
835 * You can use this instead of fget if you satisfy all of the following
837 * 1) You must call fput_light before exiting the syscall and returning control
838 * to userspace (i.e. you cannot remember the returned struct file * after
839 * returning to userspace).
840 * 2) You must not call filp_close on the returned struct file * in between
841 * calls to fget_light and fput_light.
842 * 3) You must not clone the current task in between the calls to fget_light
845 * The fput_needed flag returned by fget_light should be passed to the
846 * corresponding fput_light.
848 struct file
*fget_light(unsigned int fd
, int *fput_needed
)
851 struct files_struct
*files
= current
->files
;
854 if (atomic_read(&files
->count
) == 1) {
855 file
= fcheck_files(files
, fd
);
856 if (file
&& (file
->f_mode
& FMODE_PATH
))
860 file
= fcheck_files(files
, fd
);
862 if (!(file
->f_mode
& FMODE_PATH
) &&
863 atomic_long_inc_not_zero(&file
->f_count
))
866 /* Didn't get the reference, someone's freed */
874 EXPORT_SYMBOL(fget_light
);
876 struct file
*fget_raw_light(unsigned int fd
, int *fput_needed
)
879 struct files_struct
*files
= current
->files
;
882 if (atomic_read(&files
->count
) == 1) {
883 file
= fcheck_files(files
, fd
);
886 file
= fcheck_files(files
, fd
);
888 if (atomic_long_inc_not_zero(&file
->f_count
))
891 /* Didn't get the reference, someone's freed */
900 void set_close_on_exec(unsigned int fd
, int flag
)
902 struct files_struct
*files
= current
->files
;
904 spin_lock(&files
->file_lock
);
905 fdt
= files_fdtable(files
);
907 __set_close_on_exec(fd
, fdt
);
909 __clear_close_on_exec(fd
, fdt
);
910 spin_unlock(&files
->file_lock
);
913 bool get_close_on_exec(unsigned int fd
)
915 struct files_struct
*files
= current
->files
;
919 fdt
= files_fdtable(files
);
920 res
= close_on_exec(fd
, fdt
);
925 static int do_dup2(struct files_struct
*files
,
926 struct file
*file
, unsigned fd
, unsigned flags
)
932 * We need to detect attempts to do dup2() over allocated but still
933 * not finished descriptor. NB: OpenBSD avoids that at the price of
934 * extra work in their equivalent of fget() - they insert struct
935 * file immediately after grabbing descriptor, mark it larval if
936 * more work (e.g. actual opening) is needed and make sure that
937 * fget() treats larval files as absent. Potentially interesting,
938 * but while extra work in fget() is trivial, locking implications
939 * and amount of surgery on open()-related paths in VFS are not.
940 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
941 * deadlocks in rather amusing ways, AFAICS. All of that is out of
942 * scope of POSIX or SUS, since neither considers shared descriptor
943 * tables and this condition does not arise without those.
945 fdt
= files_fdtable(files
);
946 tofree
= fdt
->fd
[fd
];
947 if (!tofree
&& fd_is_open(fd
, fdt
))
950 rcu_assign_pointer(fdt
->fd
[fd
], file
);
951 __set_open_fd(fd
, fdt
);
952 if (flags
& O_CLOEXEC
)
953 __set_close_on_exec(fd
, fdt
);
955 __clear_close_on_exec(fd
, fdt
);
956 spin_unlock(&files
->file_lock
);
959 filp_close(tofree
, files
);
964 spin_unlock(&files
->file_lock
);
968 int replace_fd(unsigned fd
, struct file
*file
, unsigned flags
)
971 struct files_struct
*files
= current
->files
;
974 return __close_fd(files
, fd
);
976 if (fd
>= rlimit(RLIMIT_NOFILE
))
979 spin_lock(&files
->file_lock
);
980 err
= expand_files(files
, fd
);
981 if (unlikely(err
< 0))
983 return do_dup2(files
, file
, fd
, flags
);
986 spin_unlock(&files
->file_lock
);
990 SYSCALL_DEFINE3(dup3
, unsigned int, oldfd
, unsigned int, newfd
, int, flags
)
994 struct files_struct
*files
= current
->files
;
996 if ((flags
& ~O_CLOEXEC
) != 0)
999 if (unlikely(oldfd
== newfd
))
1002 if (newfd
>= rlimit(RLIMIT_NOFILE
))
1005 spin_lock(&files
->file_lock
);
1006 err
= expand_files(files
, newfd
);
1007 file
= fcheck(oldfd
);
1008 if (unlikely(!file
))
1010 if (unlikely(err
< 0)) {
1015 return do_dup2(files
, file
, newfd
, flags
);
1020 spin_unlock(&files
->file_lock
);
1024 SYSCALL_DEFINE2(dup2
, unsigned int, oldfd
, unsigned int, newfd
)
1026 if (unlikely(newfd
== oldfd
)) { /* corner case */
1027 struct files_struct
*files
= current
->files
;
1031 if (!fcheck_files(files
, oldfd
))
1036 return sys_dup3(oldfd
, newfd
, 0);
1039 SYSCALL_DEFINE1(dup
, unsigned int, fildes
)
1042 struct file
*file
= fget_raw(fildes
);
1045 ret
= get_unused_fd();
1047 fd_install(ret
, file
);
1054 int f_dupfd(unsigned int from
, struct file
*file
, unsigned flags
)
1057 if (from
>= rlimit(RLIMIT_NOFILE
))
1059 err
= alloc_fd(from
, flags
);
1062 fd_install(err
, file
);
1067 int iterate_fd(struct files_struct
*files
, unsigned n
,
1068 int (*f
)(const void *, struct file
*, unsigned),
1071 struct fdtable
*fdt
;
1075 spin_lock(&files
->file_lock
);
1076 for (fdt
= files_fdtable(files
); n
< fdt
->max_fds
; n
++) {
1078 file
= rcu_dereference_check_fdtable(files
, fdt
->fd
[n
]);
1081 res
= f(p
, file
, n
);
1085 spin_unlock(&files
->file_lock
);
1088 EXPORT_SYMBOL(iterate_fd
);