4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 * Manage the dynamic fd arrays in the process files_struct.
9 #include <linux/syscalls.h>
10 #include <linux/export.h>
13 #include <linux/mmzone.h>
14 #include <linux/time.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/file.h>
19 #include <linux/fdtable.h>
20 #include <linux/bitops.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
26 int sysctl_nr_open __read_mostly
= 1024*1024;
27 int sysctl_nr_open_min
= BITS_PER_LONG
;
28 /* our max() is unusable in constant expressions ;-/ */
29 #define __const_max(x, y) ((x) < (y) ? (x) : (y))
30 int sysctl_nr_open_max
= __const_max(INT_MAX
, ~(size_t)0/sizeof(void *)) &
33 static void *alloc_fdmem(size_t size
)
36 * Very large allocations can stress page reclaim, so fall back to
37 * vmalloc() if the allocation size will be considered "large" by the VM.
39 if (size
<= (PAGE_SIZE
<< PAGE_ALLOC_COSTLY_ORDER
)) {
40 void *data
= kmalloc(size
, GFP_KERNEL
|__GFP_NOWARN
|__GFP_NORETRY
);
47 static void __free_fdtable(struct fdtable
*fdt
)
50 kvfree(fdt
->open_fds
);
54 static void free_fdtable_rcu(struct rcu_head
*rcu
)
56 __free_fdtable(container_of(rcu
, struct fdtable
, rcu
));
59 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
60 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
63 * Expand the fdset in the files_struct. Called with the files spinlock
66 static void copy_fdtable(struct fdtable
*nfdt
, struct fdtable
*ofdt
)
68 unsigned int cpy
, set
;
70 BUG_ON(nfdt
->max_fds
< ofdt
->max_fds
);
72 cpy
= ofdt
->max_fds
* sizeof(struct file
*);
73 set
= (nfdt
->max_fds
- ofdt
->max_fds
) * sizeof(struct file
*);
74 memcpy(nfdt
->fd
, ofdt
->fd
, cpy
);
75 memset((char *)(nfdt
->fd
) + cpy
, 0, set
);
77 cpy
= ofdt
->max_fds
/ BITS_PER_BYTE
;
78 set
= (nfdt
->max_fds
- ofdt
->max_fds
) / BITS_PER_BYTE
;
79 memcpy(nfdt
->open_fds
, ofdt
->open_fds
, cpy
);
80 memset((char *)(nfdt
->open_fds
) + cpy
, 0, set
);
81 memcpy(nfdt
->close_on_exec
, ofdt
->close_on_exec
, cpy
);
82 memset((char *)(nfdt
->close_on_exec
) + cpy
, 0, set
);
84 cpy
= BITBIT_SIZE(ofdt
->max_fds
);
85 set
= BITBIT_SIZE(nfdt
->max_fds
) - cpy
;
86 memcpy(nfdt
->full_fds_bits
, ofdt
->full_fds_bits
, cpy
);
87 memset(cpy
+(char *)nfdt
->full_fds_bits
, 0, set
);
90 static struct fdtable
* alloc_fdtable(unsigned int nr
)
96 * Figure out how many fds we actually want to support in this fdtable.
97 * Allocation steps are keyed to the size of the fdarray, since it
98 * grows far faster than any of the other dynamic data. We try to fit
99 * the fdarray into comfortable page-tuned chunks: starting at 1024B
100 * and growing in powers of two from there on.
102 nr
/= (1024 / sizeof(struct file
*));
103 nr
= roundup_pow_of_two(nr
+ 1);
104 nr
*= (1024 / sizeof(struct file
*));
106 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
107 * had been set lower between the check in expand_files() and here. Deal
108 * with that in caller, it's cheaper that way.
110 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
111 * bitmaps handling below becomes unpleasant, to put it mildly...
113 if (unlikely(nr
> sysctl_nr_open
))
114 nr
= ((sysctl_nr_open
- 1) | (BITS_PER_LONG
- 1)) + 1;
116 fdt
= kmalloc(sizeof(struct fdtable
), GFP_KERNEL
);
120 data
= alloc_fdmem(nr
* sizeof(struct file
*));
125 data
= alloc_fdmem(max_t(size_t,
126 2 * nr
/ BITS_PER_BYTE
+ BITBIT_SIZE(nr
), L1_CACHE_BYTES
));
129 fdt
->open_fds
= data
;
130 data
+= nr
/ BITS_PER_BYTE
;
131 fdt
->close_on_exec
= data
;
132 data
+= nr
/ BITS_PER_BYTE
;
133 fdt
->full_fds_bits
= data
;
146 * Expand the file descriptor table.
147 * This function will allocate a new fdtable and both fd array and fdset, of
149 * Return <0 error code on error; 1 on successful completion.
150 * The files->file_lock should be held on entry, and will be held on exit.
152 static int expand_fdtable(struct files_struct
*files
, int nr
)
153 __releases(files
->file_lock
)
154 __acquires(files
->file_lock
)
156 struct fdtable
*new_fdt
, *cur_fdt
;
158 spin_unlock(&files
->file_lock
);
159 new_fdt
= alloc_fdtable(nr
);
161 /* make sure all __fd_install() have seen resize_in_progress
162 * or have finished their rcu_read_lock_sched() section.
164 if (atomic_read(&files
->count
) > 1)
167 spin_lock(&files
->file_lock
);
171 * extremely unlikely race - sysctl_nr_open decreased between the check in
172 * caller and alloc_fdtable(). Cheaper to catch it here...
174 if (unlikely(new_fdt
->max_fds
<= nr
)) {
175 __free_fdtable(new_fdt
);
178 cur_fdt
= files_fdtable(files
);
179 BUG_ON(nr
< cur_fdt
->max_fds
);
180 copy_fdtable(new_fdt
, cur_fdt
);
181 rcu_assign_pointer(files
->fdt
, new_fdt
);
182 if (cur_fdt
!= &files
->fdtab
)
183 call_rcu(&cur_fdt
->rcu
, free_fdtable_rcu
);
184 /* coupled with smp_rmb() in __fd_install() */
191 * This function will expand the file structures, if the requested size exceeds
192 * the current capacity and there is room for expansion.
193 * Return <0 error code on error; 0 when nothing done; 1 when files were
194 * expanded and execution may have blocked.
195 * The files->file_lock should be held on entry, and will be held on exit.
197 static int expand_files(struct files_struct
*files
, int nr
)
198 __releases(files
->file_lock
)
199 __acquires(files
->file_lock
)
205 fdt
= files_fdtable(files
);
207 /* Do we need to expand? */
208 if (nr
< fdt
->max_fds
)
212 if (nr
>= sysctl_nr_open
)
215 if (unlikely(files
->resize_in_progress
)) {
216 spin_unlock(&files
->file_lock
);
218 wait_event(files
->resize_wait
, !files
->resize_in_progress
);
219 spin_lock(&files
->file_lock
);
223 /* All good, so we try */
224 files
->resize_in_progress
= true;
225 expanded
= expand_fdtable(files
, nr
);
226 files
->resize_in_progress
= false;
228 wake_up_all(&files
->resize_wait
);
232 static inline void __set_close_on_exec(int fd
, struct fdtable
*fdt
)
234 __set_bit(fd
, fdt
->close_on_exec
);
237 static inline void __clear_close_on_exec(int fd
, struct fdtable
*fdt
)
239 if (test_bit(fd
, fdt
->close_on_exec
))
240 __clear_bit(fd
, fdt
->close_on_exec
);
243 static inline void __set_open_fd(unsigned int fd
, struct fdtable
*fdt
)
245 __set_bit(fd
, fdt
->open_fds
);
247 if (!~fdt
->open_fds
[fd
])
248 __set_bit(fd
, fdt
->full_fds_bits
);
251 static inline void __clear_open_fd(unsigned int fd
, struct fdtable
*fdt
)
253 __clear_bit(fd
, fdt
->open_fds
);
254 __clear_bit(fd
/ BITS_PER_LONG
, fdt
->full_fds_bits
);
257 static int count_open_files(struct fdtable
*fdt
)
259 int size
= fdt
->max_fds
;
262 /* Find the last open fd */
263 for (i
= size
/ BITS_PER_LONG
; i
> 0; ) {
264 if (fdt
->open_fds
[--i
])
267 i
= (i
+ 1) * BITS_PER_LONG
;
272 * Allocate a new files structure and copy contents from the
273 * passed in files structure.
274 * errorp will be valid only when the returned files_struct is NULL.
276 struct files_struct
*dup_fd(struct files_struct
*oldf
, int *errorp
)
278 struct files_struct
*newf
;
279 struct file
**old_fds
, **new_fds
;
280 int open_files
, size
, i
;
281 struct fdtable
*old_fdt
, *new_fdt
;
284 newf
= kmem_cache_alloc(files_cachep
, GFP_KERNEL
);
288 atomic_set(&newf
->count
, 1);
290 spin_lock_init(&newf
->file_lock
);
291 newf
->resize_in_progress
= false;
292 init_waitqueue_head(&newf
->resize_wait
);
294 new_fdt
= &newf
->fdtab
;
295 new_fdt
->max_fds
= NR_OPEN_DEFAULT
;
296 new_fdt
->close_on_exec
= newf
->close_on_exec_init
;
297 new_fdt
->open_fds
= newf
->open_fds_init
;
298 new_fdt
->full_fds_bits
= newf
->full_fds_bits_init
;
299 new_fdt
->fd
= &newf
->fd_array
[0];
301 spin_lock(&oldf
->file_lock
);
302 old_fdt
= files_fdtable(oldf
);
303 open_files
= count_open_files(old_fdt
);
306 * Check whether we need to allocate a larger fd array and fd set.
308 while (unlikely(open_files
> new_fdt
->max_fds
)) {
309 spin_unlock(&oldf
->file_lock
);
311 if (new_fdt
!= &newf
->fdtab
)
312 __free_fdtable(new_fdt
);
314 new_fdt
= alloc_fdtable(open_files
- 1);
320 /* beyond sysctl_nr_open; nothing to do */
321 if (unlikely(new_fdt
->max_fds
< open_files
)) {
322 __free_fdtable(new_fdt
);
328 * Reacquire the oldf lock and a pointer to its fd table
329 * who knows it may have a new bigger fd table. We need
330 * the latest pointer.
332 spin_lock(&oldf
->file_lock
);
333 old_fdt
= files_fdtable(oldf
);
334 open_files
= count_open_files(old_fdt
);
337 old_fds
= old_fdt
->fd
;
338 new_fds
= new_fdt
->fd
;
340 memcpy(new_fdt
->open_fds
, old_fdt
->open_fds
, open_files
/ 8);
341 memcpy(new_fdt
->close_on_exec
, old_fdt
->close_on_exec
, open_files
/ 8);
342 memcpy(new_fdt
->full_fds_bits
, old_fdt
->full_fds_bits
, BITBIT_SIZE(open_files
));
344 for (i
= open_files
; i
!= 0; i
--) {
345 struct file
*f
= *old_fds
++;
350 * The fd may be claimed in the fd bitmap but not yet
351 * instantiated in the files array if a sibling thread
352 * is partway through open(). So make sure that this
353 * fd is available to the new process.
355 __clear_open_fd(open_files
- i
, new_fdt
);
357 rcu_assign_pointer(*new_fds
++, f
);
359 spin_unlock(&oldf
->file_lock
);
361 /* compute the remainder to be cleared */
362 size
= (new_fdt
->max_fds
- open_files
) * sizeof(struct file
*);
364 /* This is long word aligned thus could use a optimized version */
365 memset(new_fds
, 0, size
);
367 if (new_fdt
->max_fds
> open_files
) {
368 int left
= (new_fdt
->max_fds
- open_files
) / 8;
369 int start
= open_files
/ BITS_PER_LONG
;
371 memset(&new_fdt
->open_fds
[start
], 0, left
);
372 memset(&new_fdt
->close_on_exec
[start
], 0, left
);
375 rcu_assign_pointer(newf
->fdt
, new_fdt
);
380 kmem_cache_free(files_cachep
, newf
);
385 static struct fdtable
*close_files(struct files_struct
* files
)
388 * It is safe to dereference the fd table without RCU or
389 * ->file_lock because this is the last reference to the
392 struct fdtable
*fdt
= rcu_dereference_raw(files
->fdt
);
397 i
= j
* BITS_PER_LONG
;
398 if (i
>= fdt
->max_fds
)
400 set
= fdt
->open_fds
[j
++];
403 struct file
* file
= xchg(&fdt
->fd
[i
], NULL
);
405 filp_close(file
, files
);
406 cond_resched_rcu_qs();
417 struct files_struct
*get_files_struct(struct task_struct
*task
)
419 struct files_struct
*files
;
424 atomic_inc(&files
->count
);
430 void put_files_struct(struct files_struct
*files
)
432 if (atomic_dec_and_test(&files
->count
)) {
433 struct fdtable
*fdt
= close_files(files
);
435 /* free the arrays if they are not embedded */
436 if (fdt
!= &files
->fdtab
)
438 kmem_cache_free(files_cachep
, files
);
442 void reset_files_struct(struct files_struct
*files
)
444 struct task_struct
*tsk
= current
;
445 struct files_struct
*old
;
451 put_files_struct(old
);
454 void exit_files(struct task_struct
*tsk
)
456 struct files_struct
* files
= tsk
->files
;
462 put_files_struct(files
);
466 struct files_struct init_files
= {
467 .count
= ATOMIC_INIT(1),
468 .fdt
= &init_files
.fdtab
,
470 .max_fds
= NR_OPEN_DEFAULT
,
471 .fd
= &init_files
.fd_array
[0],
472 .close_on_exec
= init_files
.close_on_exec_init
,
473 .open_fds
= init_files
.open_fds_init
,
474 .full_fds_bits
= init_files
.full_fds_bits_init
,
476 .file_lock
= __SPIN_LOCK_UNLOCKED(init_files
.file_lock
),
479 static unsigned long find_next_fd(struct fdtable
*fdt
, unsigned long start
)
481 unsigned long maxfd
= fdt
->max_fds
;
482 unsigned long maxbit
= maxfd
/ BITS_PER_LONG
;
483 unsigned long bitbit
= start
/ BITS_PER_LONG
;
485 bitbit
= find_next_zero_bit(fdt
->full_fds_bits
, maxbit
, bitbit
) * BITS_PER_LONG
;
490 return find_next_zero_bit(fdt
->open_fds
, maxfd
, start
);
494 * allocate a file descriptor, mark it busy.
496 int __alloc_fd(struct files_struct
*files
,
497 unsigned start
, unsigned end
, unsigned flags
)
503 spin_lock(&files
->file_lock
);
505 fdt
= files_fdtable(files
);
507 if (fd
< files
->next_fd
)
510 if (fd
< fdt
->max_fds
)
511 fd
= find_next_fd(fdt
, fd
);
514 * N.B. For clone tasks sharing a files structure, this test
515 * will limit the total number of files that can be opened.
521 error
= expand_files(files
, fd
);
526 * If we needed to expand the fs array we
527 * might have blocked - try again.
532 if (start
<= files
->next_fd
)
533 files
->next_fd
= fd
+ 1;
535 __set_open_fd(fd
, fdt
);
536 if (flags
& O_CLOEXEC
)
537 __set_close_on_exec(fd
, fdt
);
539 __clear_close_on_exec(fd
, fdt
);
543 if (rcu_access_pointer(fdt
->fd
[fd
]) != NULL
) {
544 printk(KERN_WARNING
"alloc_fd: slot %d not NULL!\n", fd
);
545 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
550 spin_unlock(&files
->file_lock
);
554 static int alloc_fd(unsigned start
, unsigned flags
)
556 return __alloc_fd(current
->files
, start
, rlimit(RLIMIT_NOFILE
), flags
);
559 int get_unused_fd_flags(unsigned flags
)
561 return __alloc_fd(current
->files
, 0, rlimit(RLIMIT_NOFILE
), flags
);
563 EXPORT_SYMBOL(get_unused_fd_flags
);
565 static void __put_unused_fd(struct files_struct
*files
, unsigned int fd
)
567 struct fdtable
*fdt
= files_fdtable(files
);
568 __clear_open_fd(fd
, fdt
);
569 if (fd
< files
->next_fd
)
573 void put_unused_fd(unsigned int fd
)
575 struct files_struct
*files
= current
->files
;
576 spin_lock(&files
->file_lock
);
577 __put_unused_fd(files
, fd
);
578 spin_unlock(&files
->file_lock
);
581 EXPORT_SYMBOL(put_unused_fd
);
584 * Install a file pointer in the fd array.
586 * The VFS is full of places where we drop the files lock between
587 * setting the open_fds bitmap and installing the file in the file
588 * array. At any such point, we are vulnerable to a dup2() race
589 * installing a file in the array before us. We need to detect this and
590 * fput() the struct file we are about to overwrite in this case.
592 * It should never happen - if we allow dup2() do it, _really_ bad things
595 * NOTE: __fd_install() variant is really, really low-level; don't
596 * use it unless you are forced to by truly lousy API shoved down
597 * your throat. 'files' *MUST* be either current->files or obtained
598 * by get_files_struct(current) done by whoever had given it to you,
599 * or really bad things will happen. Normally you want to use
600 * fd_install() instead.
603 void __fd_install(struct files_struct
*files
, unsigned int fd
,
609 rcu_read_lock_sched();
611 while (unlikely(files
->resize_in_progress
)) {
612 rcu_read_unlock_sched();
613 wait_event(files
->resize_wait
, !files
->resize_in_progress
);
614 rcu_read_lock_sched();
616 /* coupled with smp_wmb() in expand_fdtable() */
618 fdt
= rcu_dereference_sched(files
->fdt
);
619 BUG_ON(fdt
->fd
[fd
] != NULL
);
620 rcu_assign_pointer(fdt
->fd
[fd
], file
);
621 rcu_read_unlock_sched();
624 void fd_install(unsigned int fd
, struct file
*file
)
626 __fd_install(current
->files
, fd
, file
);
629 EXPORT_SYMBOL(fd_install
);
632 * The same warnings as for __alloc_fd()/__fd_install() apply here...
634 int __close_fd(struct files_struct
*files
, unsigned fd
)
639 spin_lock(&files
->file_lock
);
640 fdt
= files_fdtable(files
);
641 if (fd
>= fdt
->max_fds
)
646 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
647 __clear_close_on_exec(fd
, fdt
);
648 __put_unused_fd(files
, fd
);
649 spin_unlock(&files
->file_lock
);
650 return filp_close(file
, files
);
653 spin_unlock(&files
->file_lock
);
657 void do_close_on_exec(struct files_struct
*files
)
662 /* exec unshares first */
663 spin_lock(&files
->file_lock
);
666 unsigned fd
= i
* BITS_PER_LONG
;
667 fdt
= files_fdtable(files
);
668 if (fd
>= fdt
->max_fds
)
670 set
= fdt
->close_on_exec
[i
];
673 fdt
->close_on_exec
[i
] = 0;
674 for ( ; set
; fd
++, set
>>= 1) {
681 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
682 __put_unused_fd(files
, fd
);
683 spin_unlock(&files
->file_lock
);
684 filp_close(file
, files
);
686 spin_lock(&files
->file_lock
);
690 spin_unlock(&files
->file_lock
);
693 static struct file
*__fget(unsigned int fd
, fmode_t mask
)
695 struct files_struct
*files
= current
->files
;
700 file
= fcheck_files(files
, fd
);
702 /* File object ref couldn't be taken.
703 * dup2() atomicity guarantee is the reason
704 * we loop to catch the new file (or NULL pointer)
706 if (file
->f_mode
& mask
)
708 else if (!get_file_rcu(file
))
716 struct file
*fget(unsigned int fd
)
718 return __fget(fd
, FMODE_PATH
);
722 struct file
*fget_raw(unsigned int fd
)
724 return __fget(fd
, 0);
726 EXPORT_SYMBOL(fget_raw
);
729 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
731 * You can use this instead of fget if you satisfy all of the following
733 * 1) You must call fput_light before exiting the syscall and returning control
734 * to userspace (i.e. you cannot remember the returned struct file * after
735 * returning to userspace).
736 * 2) You must not call filp_close on the returned struct file * in between
737 * calls to fget_light and fput_light.
738 * 3) You must not clone the current task in between the calls to fget_light
741 * The fput_needed flag returned by fget_light should be passed to the
742 * corresponding fput_light.
744 static unsigned long __fget_light(unsigned int fd
, fmode_t mask
)
746 struct files_struct
*files
= current
->files
;
749 if (atomic_read(&files
->count
) == 1) {
750 file
= __fcheck_files(files
, fd
);
751 if (!file
|| unlikely(file
->f_mode
& mask
))
753 return (unsigned long)file
;
755 file
= __fget(fd
, mask
);
758 return FDPUT_FPUT
| (unsigned long)file
;
761 unsigned long __fdget(unsigned int fd
)
763 return __fget_light(fd
, FMODE_PATH
);
765 EXPORT_SYMBOL(__fdget
);
767 unsigned long __fdget_raw(unsigned int fd
)
769 return __fget_light(fd
, 0);
772 unsigned long __fdget_pos(unsigned int fd
)
774 unsigned long v
= __fdget(fd
);
775 struct file
*file
= (struct file
*)(v
& ~3);
777 if (file
&& (file
->f_mode
& FMODE_ATOMIC_POS
)) {
778 if (file_count(file
) > 1) {
779 v
|= FDPUT_POS_UNLOCK
;
780 mutex_lock(&file
->f_pos_lock
);
787 * We only lock f_pos if we have threads or if the file might be
788 * shared with another process. In both cases we'll have an elevated
789 * file count (done either by fdget() or by fork()).
792 void set_close_on_exec(unsigned int fd
, int flag
)
794 struct files_struct
*files
= current
->files
;
796 spin_lock(&files
->file_lock
);
797 fdt
= files_fdtable(files
);
799 __set_close_on_exec(fd
, fdt
);
801 __clear_close_on_exec(fd
, fdt
);
802 spin_unlock(&files
->file_lock
);
805 bool get_close_on_exec(unsigned int fd
)
807 struct files_struct
*files
= current
->files
;
811 fdt
= files_fdtable(files
);
812 res
= close_on_exec(fd
, fdt
);
817 static int do_dup2(struct files_struct
*files
,
818 struct file
*file
, unsigned fd
, unsigned flags
)
819 __releases(&files
->file_lock
)
825 * We need to detect attempts to do dup2() over allocated but still
826 * not finished descriptor. NB: OpenBSD avoids that at the price of
827 * extra work in their equivalent of fget() - they insert struct
828 * file immediately after grabbing descriptor, mark it larval if
829 * more work (e.g. actual opening) is needed and make sure that
830 * fget() treats larval files as absent. Potentially interesting,
831 * but while extra work in fget() is trivial, locking implications
832 * and amount of surgery on open()-related paths in VFS are not.
833 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
834 * deadlocks in rather amusing ways, AFAICS. All of that is out of
835 * scope of POSIX or SUS, since neither considers shared descriptor
836 * tables and this condition does not arise without those.
838 fdt
= files_fdtable(files
);
839 tofree
= fdt
->fd
[fd
];
840 if (!tofree
&& fd_is_open(fd
, fdt
))
843 rcu_assign_pointer(fdt
->fd
[fd
], file
);
844 __set_open_fd(fd
, fdt
);
845 if (flags
& O_CLOEXEC
)
846 __set_close_on_exec(fd
, fdt
);
848 __clear_close_on_exec(fd
, fdt
);
849 spin_unlock(&files
->file_lock
);
852 filp_close(tofree
, files
);
857 spin_unlock(&files
->file_lock
);
861 int replace_fd(unsigned fd
, struct file
*file
, unsigned flags
)
864 struct files_struct
*files
= current
->files
;
867 return __close_fd(files
, fd
);
869 if (fd
>= rlimit(RLIMIT_NOFILE
))
872 spin_lock(&files
->file_lock
);
873 err
= expand_files(files
, fd
);
874 if (unlikely(err
< 0))
876 return do_dup2(files
, file
, fd
, flags
);
879 spin_unlock(&files
->file_lock
);
883 SYSCALL_DEFINE3(dup3
, unsigned int, oldfd
, unsigned int, newfd
, int, flags
)
887 struct files_struct
*files
= current
->files
;
889 if ((flags
& ~O_CLOEXEC
) != 0)
892 if (unlikely(oldfd
== newfd
))
895 if (newfd
>= rlimit(RLIMIT_NOFILE
))
898 spin_lock(&files
->file_lock
);
899 err
= expand_files(files
, newfd
);
900 file
= fcheck(oldfd
);
903 if (unlikely(err
< 0)) {
908 return do_dup2(files
, file
, newfd
, flags
);
913 spin_unlock(&files
->file_lock
);
917 SYSCALL_DEFINE2(dup2
, unsigned int, oldfd
, unsigned int, newfd
)
919 if (unlikely(newfd
== oldfd
)) { /* corner case */
920 struct files_struct
*files
= current
->files
;
924 if (!fcheck_files(files
, oldfd
))
929 return sys_dup3(oldfd
, newfd
, 0);
932 SYSCALL_DEFINE1(dup
, unsigned int, fildes
)
935 struct file
*file
= fget_raw(fildes
);
938 ret
= get_unused_fd_flags(0);
940 fd_install(ret
, file
);
947 int f_dupfd(unsigned int from
, struct file
*file
, unsigned flags
)
950 if (from
>= rlimit(RLIMIT_NOFILE
))
952 err
= alloc_fd(from
, flags
);
955 fd_install(err
, file
);
960 int iterate_fd(struct files_struct
*files
, unsigned n
,
961 int (*f
)(const void *, struct file
*, unsigned),
968 spin_lock(&files
->file_lock
);
969 for (fdt
= files_fdtable(files
); n
< fdt
->max_fds
; n
++) {
971 file
= rcu_dereference_check_fdtable(files
, fdt
->fd
[n
]);
978 spin_unlock(&files
->file_lock
);
981 EXPORT_SYMBOL(iterate_fd
);