4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 * Manage the dynamic fd arrays in the process files_struct.
9 #include <linux/syscalls.h>
10 #include <linux/export.h>
13 #include <linux/mmzone.h>
14 #include <linux/time.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/file.h>
19 #include <linux/fdtable.h>
20 #include <linux/bitops.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
26 #if defined(CONFIG_SEC_FD_DETECT)
27 extern void save_open_close_fdinfo(int fd
, int flag
, struct task_struct
*cur
, struct files_struct
*files
);
28 extern void check_fd_invalid_close(int fd
, struct task_struct
*cur
, struct files_struct
*files
, struct file
*file
);
29 #endif // END CONFIG_SEC_FD_DETECT
31 int sysctl_nr_open __read_mostly
= 1024*1024;
32 int sysctl_nr_open_min
= BITS_PER_LONG
;
33 /* our max() is unusable in constant expressions ;-/ */
34 #define __const_max(x, y) ((x) < (y) ? (x) : (y))
35 int sysctl_nr_open_max
= __const_max(INT_MAX
, ~(size_t)0/sizeof(void *)) &
38 #ifdef CONFIG_SEC_DEBUG_FILE_LEAK
39 extern void sec_debug_EMFILE_error_proc(unsigned long files_addr
);
42 static void *alloc_fdmem(size_t size
)
45 * Very large allocations can stress page reclaim, so fall back to
46 * vmalloc() if the allocation size will be considered "large" by the VM.
48 if (size
<= (PAGE_SIZE
<< PAGE_ALLOC_COSTLY_ORDER
)) {
49 void *data
= kmalloc(size
, GFP_KERNEL
|__GFP_NOWARN
|__GFP_NORETRY
);
56 static void __free_fdtable(struct fdtable
*fdt
)
59 kvfree(fdt
->open_fds
);
63 static void free_fdtable_rcu(struct rcu_head
*rcu
)
65 __free_fdtable(container_of(rcu
, struct fdtable
, rcu
));
68 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
69 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
72 * Copy 'count' fd bits from the old table to the new table and clear the extra
73 * space if any. This does not copy the file pointers. Called with the files
74 * spinlock held for write.
76 static void copy_fd_bitmaps(struct fdtable
*nfdt
, struct fdtable
*ofdt
,
79 unsigned int cpy
, set
;
81 cpy
= count
/ BITS_PER_BYTE
;
82 set
= (nfdt
->max_fds
- count
) / BITS_PER_BYTE
;
83 memcpy(nfdt
->open_fds
, ofdt
->open_fds
, cpy
);
84 memset((char *)nfdt
->open_fds
+ cpy
, 0, set
);
85 memcpy(nfdt
->close_on_exec
, ofdt
->close_on_exec
, cpy
);
86 memset((char *)nfdt
->close_on_exec
+ cpy
, 0, set
);
88 cpy
= BITBIT_SIZE(count
);
89 set
= BITBIT_SIZE(nfdt
->max_fds
) - cpy
;
90 memcpy(nfdt
->full_fds_bits
, ofdt
->full_fds_bits
, cpy
);
91 memset((char *)nfdt
->full_fds_bits
+ cpy
, 0, set
);
95 * Copy all file descriptors from the old table to the new, expanded table and
96 * clear the extra space. Called with the files spinlock held for write.
98 static void copy_fdtable(struct fdtable
*nfdt
, struct fdtable
*ofdt
)
100 unsigned int cpy
, set
;
102 BUG_ON(nfdt
->max_fds
< ofdt
->max_fds
);
104 cpy
= ofdt
->max_fds
* sizeof(struct file
*);
105 set
= (nfdt
->max_fds
- ofdt
->max_fds
) * sizeof(struct file
*);
106 memcpy(nfdt
->fd
, ofdt
->fd
, cpy
);
107 memset((char *)nfdt
->fd
+ cpy
, 0, set
);
109 copy_fd_bitmaps(nfdt
, ofdt
, ofdt
->max_fds
);
112 static struct fdtable
* alloc_fdtable(unsigned int nr
)
118 * Figure out how many fds we actually want to support in this fdtable.
119 * Allocation steps are keyed to the size of the fdarray, since it
120 * grows far faster than any of the other dynamic data. We try to fit
121 * the fdarray into comfortable page-tuned chunks: starting at 1024B
122 * and growing in powers of two from there on.
124 nr
/= (1024 / sizeof(struct file
*));
125 nr
= roundup_pow_of_two(nr
+ 1);
126 nr
*= (1024 / sizeof(struct file
*));
128 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
129 * had been set lower between the check in expand_files() and here. Deal
130 * with that in caller, it's cheaper that way.
132 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
133 * bitmaps handling below becomes unpleasant, to put it mildly...
135 if (unlikely(nr
> sysctl_nr_open
))
136 nr
= ((sysctl_nr_open
- 1) | (BITS_PER_LONG
- 1)) + 1;
138 fdt
= kmalloc(sizeof(struct fdtable
), GFP_KERNEL
);
142 data
= alloc_fdmem(nr
* sizeof(struct file
*));
147 data
= alloc_fdmem(max_t(size_t,
148 2 * nr
/ BITS_PER_BYTE
+ BITBIT_SIZE(nr
), L1_CACHE_BYTES
));
151 fdt
->open_fds
= data
;
152 data
+= nr
/ BITS_PER_BYTE
;
153 fdt
->close_on_exec
= data
;
154 data
+= nr
/ BITS_PER_BYTE
;
155 fdt
->full_fds_bits
= data
;
168 * Expand the file descriptor table.
169 * This function will allocate a new fdtable and both fd array and fdset, of
171 * Return <0 error code on error; 1 on successful completion.
172 * The files->file_lock should be held on entry, and will be held on exit.
174 static int expand_fdtable(struct files_struct
*files
, int nr
)
175 __releases(files
->file_lock
)
176 __acquires(files
->file_lock
)
178 struct fdtable
*new_fdt
, *cur_fdt
;
180 spin_unlock(&files
->file_lock
);
181 new_fdt
= alloc_fdtable(nr
);
183 /* make sure all __fd_install() have seen resize_in_progress
184 * or have finished their rcu_read_lock_sched() section.
186 if (atomic_read(&files
->count
) > 1) {
192 spin_lock(&files
->file_lock
);
196 * extremely unlikely race - sysctl_nr_open decreased between the check in
197 * caller and alloc_fdtable(). Cheaper to catch it here...
199 if (unlikely(new_fdt
->max_fds
<= nr
)) {
200 #ifdef CONFIG_SEC_DEBUG_FILE_LEAK
201 sec_debug_EMFILE_error_proc((unsigned long)files
);
203 __free_fdtable(new_fdt
);
206 cur_fdt
= files_fdtable(files
);
207 BUG_ON(nr
< cur_fdt
->max_fds
);
208 copy_fdtable(new_fdt
, cur_fdt
);
209 rcu_assign_pointer(files
->fdt
, new_fdt
);
210 if (cur_fdt
!= &files
->fdtab
)
211 call_rcu(&cur_fdt
->rcu
, free_fdtable_rcu
);
212 /* coupled with smp_rmb() in __fd_install() */
219 * This function will expand the file structures, if the requested size exceeds
220 * the current capacity and there is room for expansion.
221 * Return <0 error code on error; 0 when nothing done; 1 when files were
222 * expanded and execution may have blocked.
223 * The files->file_lock should be held on entry, and will be held on exit.
225 static int expand_files(struct files_struct
*files
, int nr
)
226 __releases(files
->file_lock
)
227 __acquires(files
->file_lock
)
233 fdt
= files_fdtable(files
);
235 /* Do we need to expand? */
236 if (nr
< fdt
->max_fds
)
240 if (nr
>= sysctl_nr_open
) {
241 #ifdef CONFIG_SEC_DEBUG_FILE_LEAK
242 sec_debug_EMFILE_error_proc((unsigned long)files
);
247 if (unlikely(files
->resize_in_progress
)) {
248 spin_unlock(&files
->file_lock
);
250 wait_event(files
->resize_wait
, !files
->resize_in_progress
);
251 spin_lock(&files
->file_lock
);
255 /* All good, so we try */
256 files
->resize_in_progress
= true;
257 expanded
= expand_fdtable(files
, nr
);
258 files
->resize_in_progress
= false;
260 wake_up_all(&files
->resize_wait
);
264 static inline void __set_close_on_exec(int fd
, struct fdtable
*fdt
)
266 __set_bit(fd
, fdt
->close_on_exec
);
269 static inline void __clear_close_on_exec(int fd
, struct fdtable
*fdt
)
271 if (test_bit(fd
, fdt
->close_on_exec
))
272 __clear_bit(fd
, fdt
->close_on_exec
);
275 static inline void __set_open_fd(unsigned int fd
, struct fdtable
*fdt
)
277 __set_bit(fd
, fdt
->open_fds
);
279 if (!~fdt
->open_fds
[fd
])
280 __set_bit(fd
, fdt
->full_fds_bits
);
283 static inline void __clear_open_fd(unsigned int fd
, struct fdtable
*fdt
)
285 __clear_bit(fd
, fdt
->open_fds
);
286 __clear_bit(fd
/ BITS_PER_LONG
, fdt
->full_fds_bits
);
289 static int count_open_files(struct fdtable
*fdt
)
291 int size
= fdt
->max_fds
;
294 /* Find the last open fd */
295 for (i
= size
/ BITS_PER_LONG
; i
> 0; ) {
296 if (fdt
->open_fds
[--i
])
299 i
= (i
+ 1) * BITS_PER_LONG
;
304 * Allocate a new files structure and copy contents from the
305 * passed in files structure.
306 * errorp will be valid only when the returned files_struct is NULL.
308 struct files_struct
*dup_fd(struct files_struct
*oldf
, int *errorp
)
310 struct files_struct
*newf
;
311 struct file
**old_fds
, **new_fds
;
313 struct fdtable
*old_fdt
, *new_fdt
;
316 newf
= kmem_cache_alloc(files_cachep
, GFP_KERNEL
);
320 atomic_set(&newf
->count
, 1);
322 spin_lock_init(&newf
->file_lock
);
323 newf
->resize_in_progress
= false;
324 init_waitqueue_head(&newf
->resize_wait
);
326 new_fdt
= &newf
->fdtab
;
327 new_fdt
->max_fds
= NR_OPEN_DEFAULT
;
328 new_fdt
->close_on_exec
= newf
->close_on_exec_init
;
329 new_fdt
->open_fds
= newf
->open_fds_init
;
330 new_fdt
->full_fds_bits
= newf
->full_fds_bits_init
;
331 new_fdt
->fd
= &newf
->fd_array
[0];
333 spin_lock(&oldf
->file_lock
);
334 old_fdt
= files_fdtable(oldf
);
335 open_files
= count_open_files(old_fdt
);
338 * Check whether we need to allocate a larger fd array and fd set.
340 while (unlikely(open_files
> new_fdt
->max_fds
)) {
341 spin_unlock(&oldf
->file_lock
);
343 if (new_fdt
!= &newf
->fdtab
)
344 __free_fdtable(new_fdt
);
346 new_fdt
= alloc_fdtable(open_files
- 1);
352 /* beyond sysctl_nr_open; nothing to do */
353 if (unlikely(new_fdt
->max_fds
< open_files
)) {
354 #ifdef CONFIG_SEC_DEBUG_FILE_LEAK
355 sec_debug_EMFILE_error_proc((unsigned long)oldf
);
357 __free_fdtable(new_fdt
);
363 * Reacquire the oldf lock and a pointer to its fd table
364 * who knows it may have a new bigger fd table. We need
365 * the latest pointer.
367 spin_lock(&oldf
->file_lock
);
368 old_fdt
= files_fdtable(oldf
);
369 open_files
= count_open_files(old_fdt
);
372 copy_fd_bitmaps(new_fdt
, old_fdt
, open_files
);
374 old_fds
= old_fdt
->fd
;
375 new_fds
= new_fdt
->fd
;
377 for (i
= open_files
; i
!= 0; i
--) {
378 struct file
*f
= *old_fds
++;
383 * The fd may be claimed in the fd bitmap but not yet
384 * instantiated in the files array if a sibling thread
385 * is partway through open(). So make sure that this
386 * fd is available to the new process.
388 __clear_open_fd(open_files
- i
, new_fdt
);
390 rcu_assign_pointer(*new_fds
++, f
);
392 spin_unlock(&oldf
->file_lock
);
394 /* clear the remainder */
395 memset(new_fds
, 0, (new_fdt
->max_fds
- open_files
) * sizeof(struct file
*));
397 rcu_assign_pointer(newf
->fdt
, new_fdt
);
402 kmem_cache_free(files_cachep
, newf
);
407 static struct fdtable
*close_files(struct files_struct
* files
)
410 * It is safe to dereference the fd table without RCU or
411 * ->file_lock because this is the last reference to the
414 struct fdtable
*fdt
= rcu_dereference_raw(files
->fdt
);
419 i
= j
* BITS_PER_LONG
;
420 if (i
>= fdt
->max_fds
)
422 set
= fdt
->open_fds
[j
++];
425 struct file
* file
= xchg(&fdt
->fd
[i
], NULL
);
427 filp_close(file
, files
);
428 cond_resched_rcu_qs();
439 struct files_struct
*get_files_struct(struct task_struct
*task
)
441 struct files_struct
*files
;
446 atomic_inc(&files
->count
);
452 void put_files_struct(struct files_struct
*files
)
454 if (atomic_dec_and_test(&files
->count
)) {
455 struct fdtable
*fdt
= close_files(files
);
457 /* free the arrays if they are not embedded */
458 if (fdt
!= &files
->fdtab
)
460 kmem_cache_free(files_cachep
, files
);
464 void reset_files_struct(struct files_struct
*files
)
466 struct task_struct
*tsk
= current
;
467 struct files_struct
*old
;
473 put_files_struct(old
);
476 void exit_files(struct task_struct
*tsk
)
478 struct files_struct
* files
= tsk
->files
;
484 put_files_struct(files
);
488 struct files_struct init_files
= {
489 .count
= ATOMIC_INIT(1),
490 .fdt
= &init_files
.fdtab
,
492 .max_fds
= NR_OPEN_DEFAULT
,
493 .fd
= &init_files
.fd_array
[0],
494 .close_on_exec
= init_files
.close_on_exec_init
,
495 .open_fds
= init_files
.open_fds_init
,
496 .full_fds_bits
= init_files
.full_fds_bits_init
,
498 .file_lock
= __SPIN_LOCK_UNLOCKED(init_files
.file_lock
),
499 .resize_wait
= __WAIT_QUEUE_HEAD_INITIALIZER(init_files
.resize_wait
),
502 static unsigned long find_next_fd(struct fdtable
*fdt
, unsigned long start
)
504 unsigned long maxfd
= fdt
->max_fds
;
505 unsigned long maxbit
= maxfd
/ BITS_PER_LONG
;
506 unsigned long bitbit
= start
/ BITS_PER_LONG
;
508 bitbit
= find_next_zero_bit(fdt
->full_fds_bits
, maxbit
, bitbit
) * BITS_PER_LONG
;
513 return find_next_zero_bit(fdt
->open_fds
, maxfd
, start
);
517 * allocate a file descriptor, mark it busy.
519 int __alloc_fd(struct files_struct
*files
,
520 unsigned start
, unsigned end
, unsigned flags
)
526 spin_lock(&files
->file_lock
);
528 fdt
= files_fdtable(files
);
530 if (fd
< files
->next_fd
)
533 if (fd
< fdt
->max_fds
)
534 fd
= find_next_fd(fdt
, fd
);
537 * N.B. For clone tasks sharing a files structure, this test
538 * will limit the total number of files that can be opened.
542 #ifdef CONFIG_SEC_DEBUG_FILE_LEAK
543 sec_debug_EMFILE_error_proc((unsigned long)files
);
548 error
= expand_files(files
, fd
);
553 * If we needed to expand the fs array we
554 * might have blocked - try again.
559 if (start
<= files
->next_fd
)
560 files
->next_fd
= fd
+ 1;
562 __set_open_fd(fd
, fdt
);
563 if (flags
& O_CLOEXEC
)
564 __set_close_on_exec(fd
, fdt
);
566 __clear_close_on_exec(fd
, fdt
);
570 if (rcu_access_pointer(fdt
->fd
[fd
]) != NULL
) {
571 printk(KERN_WARNING
"alloc_fd: slot %d not NULL!\n", fd
);
572 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
577 spin_unlock(&files
->file_lock
);
581 static int alloc_fd(unsigned start
, unsigned flags
)
583 return __alloc_fd(current
->files
, start
, rlimit(RLIMIT_NOFILE
), flags
);
586 int get_unused_fd_flags(unsigned flags
)
588 return __alloc_fd(current
->files
, 0, rlimit(RLIMIT_NOFILE
), flags
);
590 EXPORT_SYMBOL(get_unused_fd_flags
);
592 static void __put_unused_fd(struct files_struct
*files
, unsigned int fd
)
594 struct fdtable
*fdt
= files_fdtable(files
);
595 __clear_open_fd(fd
, fdt
);
596 if (fd
< files
->next_fd
)
600 void put_unused_fd(unsigned int fd
)
602 struct files_struct
*files
= current
->files
;
603 spin_lock(&files
->file_lock
);
604 __put_unused_fd(files
, fd
);
605 spin_unlock(&files
->file_lock
);
608 EXPORT_SYMBOL(put_unused_fd
);
611 * Install a file pointer in the fd array.
613 * The VFS is full of places where we drop the files lock between
614 * setting the open_fds bitmap and installing the file in the file
615 * array. At any such point, we are vulnerable to a dup2() race
616 * installing a file in the array before us. We need to detect this and
617 * fput() the struct file we are about to overwrite in this case.
619 * It should never happen - if we allow dup2() do it, _really_ bad things
622 * NOTE: __fd_install() variant is really, really low-level; don't
623 * use it unless you are forced to by truly lousy API shoved down
624 * your throat. 'files' *MUST* be either current->files or obtained
625 * by get_files_struct(current) done by whoever had given it to you,
626 * or really bad things will happen. Normally you want to use
627 * fd_install() instead.
630 void __fd_install(struct files_struct
*files
, unsigned int fd
,
636 rcu_read_lock_sched();
638 while (unlikely(files
->resize_in_progress
)) {
639 rcu_read_unlock_sched();
640 wait_event(files
->resize_wait
, !files
->resize_in_progress
);
641 rcu_read_lock_sched();
643 /* coupled with smp_wmb() in expand_fdtable() */
645 fdt
= rcu_dereference_sched(files
->fdt
);
646 BUG_ON(fdt
->fd
[fd
] != NULL
);
647 rcu_assign_pointer(fdt
->fd
[fd
], file
);
649 #if defined(CONFIG_SEC_FD_DETECT)
650 save_open_close_fdinfo(fd
, true, current
, files
);
651 #endif // END CONFIG_SEC_FD_DETECT
652 rcu_read_unlock_sched();
655 void fd_install(unsigned int fd
, struct file
*file
)
657 __fd_install(current
->files
, fd
, file
);
660 EXPORT_SYMBOL(fd_install
);
663 * The same warnings as for __alloc_fd()/__fd_install() apply here...
665 int __close_fd(struct files_struct
*files
, unsigned fd
)
670 spin_lock(&files
->file_lock
);
671 fdt
= files_fdtable(files
);
672 if (fd
>= fdt
->max_fds
)
678 #if defined(CONFIG_SEC_FD_DETECT)
679 check_fd_invalid_close(fd
, current
, files
, file
);
680 save_open_close_fdinfo(fd
, false, current
, files
);
681 #endif // END CONFIG_SEC_FD_DETECT
683 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
684 __clear_close_on_exec(fd
, fdt
);
685 __put_unused_fd(files
, fd
);
686 spin_unlock(&files
->file_lock
);
687 return filp_close(file
, files
);
690 spin_unlock(&files
->file_lock
);
694 void do_close_on_exec(struct files_struct
*files
)
699 /* exec unshares first */
700 spin_lock(&files
->file_lock
);
703 unsigned fd
= i
* BITS_PER_LONG
;
704 fdt
= files_fdtable(files
);
705 if (fd
>= fdt
->max_fds
)
707 set
= fdt
->close_on_exec
[i
];
710 fdt
->close_on_exec
[i
] = 0;
711 for ( ; set
; fd
++, set
>>= 1) {
718 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
719 __put_unused_fd(files
, fd
);
720 spin_unlock(&files
->file_lock
);
721 filp_close(file
, files
);
723 spin_lock(&files
->file_lock
);
727 spin_unlock(&files
->file_lock
);
730 static struct file
*__fget(unsigned int fd
, fmode_t mask
)
732 struct files_struct
*files
= current
->files
;
737 file
= fcheck_files(files
, fd
);
739 /* File object ref couldn't be taken.
740 * dup2() atomicity guarantee is the reason
741 * we loop to catch the new file (or NULL pointer)
743 if (file
->f_mode
& mask
)
745 else if (!get_file_rcu(file
))
753 struct file
*fget(unsigned int fd
)
755 return __fget(fd
, FMODE_PATH
);
759 struct file
*fget_raw(unsigned int fd
)
761 return __fget(fd
, 0);
763 EXPORT_SYMBOL(fget_raw
);
766 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
768 * You can use this instead of fget if you satisfy all of the following
770 * 1) You must call fput_light before exiting the syscall and returning control
771 * to userspace (i.e. you cannot remember the returned struct file * after
772 * returning to userspace).
773 * 2) You must not call filp_close on the returned struct file * in between
774 * calls to fget_light and fput_light.
775 * 3) You must not clone the current task in between the calls to fget_light
778 * The fput_needed flag returned by fget_light should be passed to the
779 * corresponding fput_light.
781 static unsigned long __fget_light(unsigned int fd
, fmode_t mask
)
783 struct files_struct
*files
= current
->files
;
786 if (atomic_read(&files
->count
) == 1) {
787 file
= __fcheck_files(files
, fd
);
788 if (!file
|| unlikely(file
->f_mode
& mask
))
790 return (unsigned long)file
;
792 file
= __fget(fd
, mask
);
795 return FDPUT_FPUT
| (unsigned long)file
;
798 unsigned long __fdget(unsigned int fd
)
800 return __fget_light(fd
, FMODE_PATH
);
802 EXPORT_SYMBOL(__fdget
);
804 unsigned long __fdget_raw(unsigned int fd
)
806 return __fget_light(fd
, 0);
809 unsigned long __fdget_pos(unsigned int fd
)
811 unsigned long v
= __fdget(fd
);
812 struct file
*file
= (struct file
*)(v
& ~3);
814 if (file
&& (file
->f_mode
& FMODE_ATOMIC_POS
)) {
815 if (file_count(file
) > 1) {
816 v
|= FDPUT_POS_UNLOCK
;
817 mutex_lock(&file
->f_pos_lock
);
824 * We only lock f_pos if we have threads or if the file might be
825 * shared with another process. In both cases we'll have an elevated
826 * file count (done either by fdget() or by fork()).
829 void set_close_on_exec(unsigned int fd
, int flag
)
831 struct files_struct
*files
= current
->files
;
833 spin_lock(&files
->file_lock
);
834 fdt
= files_fdtable(files
);
836 __set_close_on_exec(fd
, fdt
);
838 __clear_close_on_exec(fd
, fdt
);
839 spin_unlock(&files
->file_lock
);
842 bool get_close_on_exec(unsigned int fd
)
844 struct files_struct
*files
= current
->files
;
848 fdt
= files_fdtable(files
);
849 res
= close_on_exec(fd
, fdt
);
854 static int do_dup2(struct files_struct
*files
,
855 struct file
*file
, unsigned fd
, unsigned flags
)
856 __releases(&files
->file_lock
)
862 * We need to detect attempts to do dup2() over allocated but still
863 * not finished descriptor. NB: OpenBSD avoids that at the price of
864 * extra work in their equivalent of fget() - they insert struct
865 * file immediately after grabbing descriptor, mark it larval if
866 * more work (e.g. actual opening) is needed and make sure that
867 * fget() treats larval files as absent. Potentially interesting,
868 * but while extra work in fget() is trivial, locking implications
869 * and amount of surgery on open()-related paths in VFS are not.
870 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
871 * deadlocks in rather amusing ways, AFAICS. All of that is out of
872 * scope of POSIX or SUS, since neither considers shared descriptor
873 * tables and this condition does not arise without those.
875 fdt
= files_fdtable(files
);
876 tofree
= fdt
->fd
[fd
];
877 if (!tofree
&& fd_is_open(fd
, fdt
))
880 rcu_assign_pointer(fdt
->fd
[fd
], file
);
881 __set_open_fd(fd
, fdt
);
882 if (flags
& O_CLOEXEC
)
883 __set_close_on_exec(fd
, fdt
);
885 __clear_close_on_exec(fd
, fdt
);
886 spin_unlock(&files
->file_lock
);
889 filp_close(tofree
, files
);
894 spin_unlock(&files
->file_lock
);
898 int replace_fd(unsigned fd
, struct file
*file
, unsigned flags
)
901 struct files_struct
*files
= current
->files
;
904 return __close_fd(files
, fd
);
906 if (fd
>= rlimit(RLIMIT_NOFILE
))
909 spin_lock(&files
->file_lock
);
910 err
= expand_files(files
, fd
);
911 if (unlikely(err
< 0))
913 return do_dup2(files
, file
, fd
, flags
);
916 spin_unlock(&files
->file_lock
);
920 SYSCALL_DEFINE3(dup3
, unsigned int, oldfd
, unsigned int, newfd
, int, flags
)
924 struct files_struct
*files
= current
->files
;
926 if ((flags
& ~O_CLOEXEC
) != 0)
929 if (unlikely(oldfd
== newfd
))
932 if (newfd
>= rlimit(RLIMIT_NOFILE
)) {
933 #ifdef CONFIG_SEC_DEBUG_FILE_LEAK
934 sec_debug_EMFILE_error_proc((unsigned long)files
);
939 spin_lock(&files
->file_lock
);
940 err
= expand_files(files
, newfd
);
941 file
= fcheck(oldfd
);
944 if (unlikely(err
< 0)) {
949 return do_dup2(files
, file
, newfd
, flags
);
954 spin_unlock(&files
->file_lock
);
958 SYSCALL_DEFINE2(dup2
, unsigned int, oldfd
, unsigned int, newfd
)
960 if (unlikely(newfd
== oldfd
)) { /* corner case */
961 struct files_struct
*files
= current
->files
;
965 if (!fcheck_files(files
, oldfd
))
970 return sys_dup3(oldfd
, newfd
, 0);
973 SYSCALL_DEFINE1(dup
, unsigned int, fildes
)
976 struct file
*file
= fget_raw(fildes
);
979 ret
= get_unused_fd_flags(0);
981 fd_install(ret
, file
);
988 int f_dupfd(unsigned int from
, struct file
*file
, unsigned flags
)
991 if (from
>= rlimit(RLIMIT_NOFILE
))
993 err
= alloc_fd(from
, flags
);
996 fd_install(err
, file
);
1001 int iterate_fd(struct files_struct
*files
, unsigned n
,
1002 int (*f
)(const void *, struct file
*, unsigned),
1005 struct fdtable
*fdt
;
1009 spin_lock(&files
->file_lock
);
1010 for (fdt
= files_fdtable(files
); n
< fdt
->max_fds
; n
++) {
1012 file
= rcu_dereference_check_fdtable(files
, fdt
->fd
[n
]);
1015 res
= f(p
, file
, n
);
1019 spin_unlock(&files
->file_lock
);
1022 EXPORT_SYMBOL(iterate_fd
);